From ee8954cb53e4964a5565833b5a937f1cbcb60d44 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 6 May 2007 11:17:30 +1000 Subject: [PATCH 001/437] drm/ttm: cleanup mm_ioctl ioctls to be separate ioctls. This is the first bunch of ioctls --- libdrm/xf86drm.c | 33 ++++---- linux-core/drm_bo.c | 162 ++++++++++++++++++++++++++------------- linux-core/drm_compat.c | 3 +- linux-core/drm_drv.c | 12 ++- linux-core/drm_objects.h | 3 + shared-core/drm.h | 54 ++++++++----- 6 files changed, 174 insertions(+), 93 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 09c4f298..d776fedd 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -3153,13 +3153,13 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_init_arg arg; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_init; - arg.req.p_offset = pOffset; - arg.req.p_size = pSize; - arg.req.mem_type = memType; + + arg.p_offset = pOffset; + arg.p_size = pSize; + arg.mem_type = memType; if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg)) return -errno; @@ -3169,14 +3169,12 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, int drmMMTakedown(int fd, unsigned memType) { - drm_mm_init_arg_t arg; - + struct drm_mm_type_arg arg; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_takedown; - arg.req.mem_type = memType; + arg.mem_type = memType; - if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg)) + if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg)) return -errno; return 0; @@ -3184,15 +3182,14 @@ int drmMMTakedown(int fd, unsigned memType) int drmMMLock(int fd, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_type_arg arg; int ret; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_lock; - arg.req.mem_type = memType; + arg.mem_type = memType; do{ - ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg); + ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg); } while (ret && errno == EAGAIN); return -errno; @@ -3200,15 +3197,15 @@ int drmMMLock(int fd, unsigned memType) int drmMMUnlock(int fd, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_type_arg arg; int ret; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_unlock; - arg.req.mem_type = memType; + + arg.mem_type = memType; do{ - ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg); + ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg); } while (ret && errno == EAGAIN); return -errno; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1c7013b3..f78a6f95 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2158,11 +2158,48 @@ EXPORT_SYMBOL(drm_bo_driver_init); int drm_mm_init_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - - int ret = 0; - drm_mm_init_arg_t arg; + struct drm_mm_init_arg arg; drm_buffer_manager_t *bm = &dev->bm; drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = -EINVAL; + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized.\n"); + goto out; + } + if (arg.mem_type == 0) { + DRM_ERROR("System memory buffers already initialized.\n"); + goto out; + } + ret = drm_bo_init_mm(dev, arg.mem_type, + arg.p_offset, arg.p_size); + +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + drm_buffer_manager_t *bm = &dev->bm; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); @@ -2171,59 +2208,78 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.req.op) { - case mm_init: - ret = -EINVAL; - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized.\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR - ("System memory buffers already initialized.\n"); - break; - } - ret = drm_bo_init_mm(dev, arg.req.mem_type, - arg.req.p_offset, arg.req.p_size); - break; - case mm_takedown: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = -EINVAL; - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR("No takedown for System memory buffers.\n"); - break; - } - ret = 0; - if (drm_bo_clean_mm(dev, arg.req.mem_type)) { - DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg.req.mem_type); - } - break; - case mm_lock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg.req.mem_type); - break; - case mm_unlock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = 0; - break; - default: - DRM_ERROR("Function not implemented yet\n"); + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = -EINVAL; + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized\n"); + goto out; + } + if (arg.mem_type == 0) { + DRM_ERROR("No takedown for System memory buffers.\n"); + goto out; + } + ret = 0; + if (drm_bo_clean_mm(dev, arg.mem_type)) { + DRM_ERROR("Memory manager type %d not clean. " + "Delaying takedown\n", arg.mem_type); + } +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; } + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_mm(dev, arg.mem_type); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = 0; + mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->bm.init_mutex); if (ret) diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 9ac5658c..867cee85 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -184,7 +184,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); return ret; } - +#if 0 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { @@ -195,6 +195,7 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); return ret; } +#endif static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index e5788d76..55a3435b 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -119,10 +119,18 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, - DRM_AUTH }, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + + + [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, + DRM_AUTH }, }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index ea300c18..e05b46cc 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -430,6 +430,9 @@ typedef struct drm_bo_driver { extern int drm_bo_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, diff --git a/shared-core/drm.h b/shared-core/drm.h index b4195419..d561a082 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -862,24 +862,14 @@ typedef struct drm_bo_arg{ #define DRM_BO_MEM_TYPES 8 /* For now. */ -typedef union drm_mm_init_arg{ - struct { - enum { - mm_init, - mm_takedown, - mm_query, - mm_lock, - mm_unlock - } op; - drm_u64_t p_offset; - drm_u64_t p_size; - unsigned mem_type; - unsigned expand_pad[8]; /*Future expansion */ - } req; - struct { - drm_handle_t mm_sarea; - unsigned expand_pad[8]; /*Future expansion */ - } rep; +typedef struct drm_mm_type_arg { + unsigned int mem_type; +} drm_mm_type_arg_t; + +typedef struct drm_mm_init_arg { + drm_u64_t p_offset; + drm_u64_t p_size; + unsigned int mem_type; } drm_mm_init_arg_t; /** @@ -949,10 +939,36 @@ typedef union drm_mm_init_arg{ #define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t) #define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t) -#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t) #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) +#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, drm_mm_init_arg_t) +#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, drm_mm_type_arg_t) +#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, drm_mm_type_arg_t) +#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, drm_mm_type_arg_t) + +#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcb, drm_fence_arg_t) + +#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcc, drm_bo_arg_t) +#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xcd, drm_bo_arg_t) +#define DRM_IOCTL_BO_MAP DRM_IOWR(0xce, drm_bo_arg_t) +#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xcf, drm_bo_arg_t) +#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd0, drm_bo_arg_t) +#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd1, drm_bo_arg_t) +#define DRM_IOCTL_BO_VALIDATE DRM_IOWR(0xd2, drm_bo_arg_t) +#define DRM_IOCTL_BO_FENCE DRM_IOWR(0xd3, drm_bo_arg_t) +#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, drm_bo_arg_t) +#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, drm_bo_arg_t) +#define DRM_IOCTL_BO_REF_FENCE DRM_IOWR(0xd6, drm_bo_arg_t) + + /*@}*/ /** From 6a62941ecaa7d2b8f14b30920856bfa52aee4775 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 6 May 2007 11:35:11 +1000 Subject: [PATCH 002/437] drm/ttm: cleanup most of fence ioctl split out --- libdrm/xf86drm.c | 35 +++-- linux-core/drm_drv.c | 13 +- linux-core/drm_fence.c | 310 +++++++++++++++++++++++++++++++++++++++ linux-core/drm_objects.h | 10 +- shared-core/drm.h | 36 ++--- 5 files changed, 361 insertions(+), 43 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index d776fedd..b53fe2fb 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2272,8 +2272,7 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type, arg.flags = flags; arg.type = type; arg.class = class; - arg.op = drm_fence_create; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg)) return -errno; fence->handle = arg.handle; fence->class = arg.class; @@ -2295,8 +2294,8 @@ int drmFenceBuffers(int fd, unsigned flags, drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.flags = flags; - arg.op = drm_fence_buffers; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg)) return -errno; fence->handle = arg.handle; fence->class = arg.class; @@ -2312,8 +2311,8 @@ int drmFenceDestroy(int fd, const drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; - arg.op = drm_fence_destroy; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, &arg)) return -errno; return 0; } @@ -2324,8 +2323,8 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = handle; - arg.op = drm_fence_reference; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg)) return -errno; fence->handle = arg.handle; fence->class = arg.class; @@ -2341,8 +2340,8 @@ int drmFenceUnreference(int fd, const drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; - arg.op = drm_fence_unreference; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg)) return -errno; return 0; } @@ -2354,8 +2353,8 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; arg.type = flush_type; - arg.op = drm_fence_flush; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg)) return -errno; fence->class = arg.class; fence->type = arg.type; @@ -2369,8 +2368,8 @@ int drmFenceUpdate(int fd, drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; - arg.op = drm_fence_signaled; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg)) return -errno; fence->class = arg.class; fence->type = arg.type; @@ -2413,8 +2412,8 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type) arg.flags = flags; arg.handle = fence->handle; arg.type = emit_type; - arg.op = drm_fence_emit; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg)) return -errno; fence->class = arg.class; fence->type = arg.type; @@ -2447,9 +2446,9 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type) arg.handle = fence->handle; arg.type = flush_type; arg.flags = flags; - arg.op = drm_fence_wait; + do { - ret = ioctl(fd, DRM_IOCTL_FENCE, &arg); + ret = ioctl(fd, DRM_IOCTL_FENCE_WAIT, &arg); } while (ret != 0 && errno == EAGAIN); if (ret) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 55a3435b..6b98f2c1 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -117,7 +117,7 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, @@ -131,6 +131,17 @@ static drm_ioctl_desc_t drm_ioctls[] = { DRM_AUTH }, [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, DRM_AUTH }, + + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, + }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ce161dc3..fe11e87b 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -659,3 +659,313 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } + +int drm_fence_create_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + if (arg.flags & DRM_FENCE_FLAG_EMIT) + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_object_create(dev, arg.class, + arg.type, arg.flags, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) { + drm_fence_usage_deref_unlocked(dev, fence); + return ret; + } + + /* + * usage > 0. No need to lock dev->struct_mutex; + */ + + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_user_object_t *uo; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + return ret; +} + + +int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + drm_user_object_t *uo; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); + if (ret) + return ret; + fence = drm_lookup_fence_object(priv, arg.handle); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + return drm_user_object_unref(priv, arg.handle, drm_fence_type); +} + +int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_flush(dev, fence, arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_wait(dev, fence, + arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + LOCK_TEST_WITH_RETURN(dev, filp); + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, + arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized\n"); + return -EINVAL; + } + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_buffer_objects(priv, NULL, arg.flags, + NULL, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e05b46cc..17338da5 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -211,8 +211,16 @@ extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, drm_fence_object_t ** c_fence); extern int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, int shareable); -extern int drm_fence_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); /************************************************** *TTMs */ diff --git a/shared-core/drm.h b/shared-core/drm.h index d561a082..9810321e 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -677,17 +677,6 @@ typedef struct drm_fence_arg { unsigned flags; unsigned signaled; unsigned expand_pad[4]; /*Future expansion */ - enum { - drm_fence_create, - drm_fence_destroy, - drm_fence_reference, - drm_fence_unreference, - drm_fence_signaled, - drm_fence_flush, - drm_fence_wait, - drm_fence_emit, - drm_fence_buffers - } op; } drm_fence_arg_t; /* Buffer permissions, referring to how the GPU uses the buffers. @@ -954,19 +943,20 @@ typedef struct drm_mm_init_arg { #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, drm_fence_arg_t) #define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, drm_fence_arg_t) #define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcb, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, drm_fence_arg_t) -#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcc, drm_bo_arg_t) -#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xcd, drm_bo_arg_t) -#define DRM_IOCTL_BO_MAP DRM_IOWR(0xce, drm_bo_arg_t) -#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xcf, drm_bo_arg_t) -#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd0, drm_bo_arg_t) -#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd1, drm_bo_arg_t) -#define DRM_IOCTL_BO_VALIDATE DRM_IOWR(0xd2, drm_bo_arg_t) -#define DRM_IOCTL_BO_FENCE DRM_IOWR(0xd3, drm_bo_arg_t) -#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, drm_bo_arg_t) -#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, drm_bo_arg_t) -#define DRM_IOCTL_BO_REF_FENCE DRM_IOWR(0xd6, drm_bo_arg_t) +#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, drm_bo_arg_t) +#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, drm_bo_arg_t) +#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, drm_bo_arg_t) +#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, drm_bo_arg_t) +#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, drm_bo_arg_t) +#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, drm_bo_arg_t) +#define DRM_IOCTL_BO_VALIDATE DRM_IOWR(0xd3, drm_bo_arg_t) +#define DRM_IOCTL_BO_FENCE DRM_IOWR(0xd4, drm_bo_arg_t) +#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd5, drm_bo_arg_t) +#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd6, drm_bo_arg_t) +#define DRM_IOCTL_BO_REF_FENCE DRM_IOWR(0xd7, drm_bo_arg_t) /*@}*/ From 25c51f539f254937d116699e66f8c382d78e71d4 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 May 2007 17:53:58 +1000 Subject: [PATCH 003/437] drm/ttm: ioctl cleanup for buffer object - user side only This just cleans up the xf86drm.c to what I want and drm.h, I need to fix up the kernel internals to suit these changes now. I've moved to using struct instead of typedefs for the bo and it doesn't look that bad so I'll do the same thing for mm and fence.. --- libdrm/xf86drm.c | 185 ++++++++++++++++---------------------------- libdrm/xf86mm.h | 2 +- linux-core/drm_bo.c | 74 ++++++++++++++++++ shared-core/drm.h | 118 ++++++++++++++++------------ 4 files changed, 209 insertions(+), 170 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index b53fe2fb..63b9354f 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2598,7 +2598,7 @@ int drmBOCreateList(int numTarget, drmBOList *list) return drmAdjustListNodes(list); } -static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, +static void drmBOCopyReply(struct drm_bo_info_rep *rep, drmBO *buf) { buf->handle = rep->handle; @@ -2620,9 +2620,9 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, unsigned mask, unsigned hint, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_create_arg arg; + struct drm_bo_create_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret; memset(buf, 0, sizeof(*buf)); @@ -2649,21 +2649,13 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, default: return -EINVAL; } - req->op = drm_bo_create; do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg); } while (ret != 0 && errno == EAGAIN); if (ret) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - fprintf(stderr, "Error %d\n", rep->ret); - return rep->ret; - } drmBOCopyReply(rep, buf); buf->mapVirtual = NULL; @@ -2674,9 +2666,7 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, int drmBODestroy(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_handle_arg arg; if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) { (void) drmUnmap(buf->mapVirtual, buf->start + buf->size); @@ -2685,41 +2675,27 @@ int drmBODestroy(int fd, drmBO *buf) } memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->op = drm_bo_destroy; + arg.handle = buf->handle; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) + if (ioctl(fd, DRM_IOCTL_BO_DESTROY, &arg)) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - return rep->ret; - } buf->handle = 0; return 0; } - + int drmBOReference(int fd, unsigned handle, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; memset(&arg, 0, sizeof(arg)); req->handle = handle; - req->op = drm_bo_reference; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) + if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg)) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - return rep->ret; - } drmBOCopyReply(rep, buf); buf->type = drm_bo_type_dc; @@ -2732,10 +2708,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf) int drmBOUnReference(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; - + struct drm_bo_handle_arg arg; if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) { (void) munmap(buf->mapVirtual, buf->start + buf->size); @@ -2744,22 +2717,16 @@ int drmBOUnReference(int fd, drmBO *buf) } memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->op = drm_bo_unreference; + arg.handle = buf->handle; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) + if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg)) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - return rep->ret; - } buf->handle = 0; return 0; } + /* * Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together * Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the @@ -2770,9 +2737,9 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; /* @@ -2797,7 +2764,6 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, req->handle = buf->handle; req->mask = mapFlags; req->hint = mapHint; - req->op = drm_bo_map; /* * May hang if the buffer object is busy. @@ -2805,15 +2771,11 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, */ do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_MAP, &arg); } while (ret != 0 && errno == EAGAIN); if (ret) return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; drmBOCopyReply(rep, buf); buf->mapFlags = mapFlags; @@ -2823,45 +2785,37 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, return 0; } + int drmBOUnmap(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; - + struct drm_bo_handle_arg arg; memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->op = drm_bo_unmap; + arg.handle = buf->handle; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) { + if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) { return -errno; } - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; - return 0; } int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, unsigned hint) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_arg_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->mask = flags; - req->hint = hint; + req->bo_req.handle = buf->handle; + req->bo_req.mask = flags; + req->bo_req.hint = hint; req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */ req->op = drm_bo_validate; do{ - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg); } while (ret && errno == EAGAIN); if (ret) @@ -2871,25 +2825,25 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, if (rep->ret) return rep->ret; - drmBOCopyReply(rep, buf); + drmBOCopyReply(&rep->bo_info, buf); return 0; } int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_arg_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->mask = flags; + req->bo_req.handle = buf->handle; + req->bo_req.mask = flags; req->arg_handle = fenceHandle; - req->op = drm_bo_validate; + req->op = drm_bo_fence; - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg); if (ret) return -errno; @@ -2902,51 +2856,43 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle) int drmBOInfo(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; - req->op = drm_bo_info; - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg); if (ret) return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; + drmBOCopyReply(rep, buf); return 0; } int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; if ((buf->flags & DRM_BO_FLAG_SHAREABLE) || (buf->replyFlags & DRM_BO_REP_BUSY)) { memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; - req->op = drm_bo_wait_idle; req->hint = hint; do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg); } while (ret && errno == EAGAIN); if (ret) return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; + drmBOCopyReply(rep, buf); } return 0; @@ -2967,7 +2913,6 @@ int drmBOBusy(int fd, drmBO *buf, int *busy) } } - int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, unsigned mask, int *newItem) @@ -3029,9 +2974,9 @@ int drmBOValidateList(int fd, drmBOList *list) drmBONode *node; drmMMListHead *l; - drm_bo_arg_t *arg, *first; - drm_bo_arg_request_t *req; - drm_bo_arg_reply_t *rep; + struct drm_bo_op_arg *arg, *first; + struct drm_bo_op_req *req; + struct drm_bo_arg_rep *rep; drm_u64_t *prevNext = NULL; drmBO *buf; int ret; @@ -3052,10 +2997,10 @@ int drmBOValidateList(int fd, drmBOList *list) memset(arg, 0, sizeof(*arg)); prevNext = &arg->next; - req->handle = node->buf->handle; + req->bo_req.handle = node->buf->handle; req->op = drm_bo_validate; - req->mask = node->arg0; - req->hint = 0; + req->bo_req.mask = node->arg0; + req->bo_req.hint = 0; req->arg_handle = node->arg1; } @@ -3063,7 +3008,7 @@ int drmBOValidateList(int fd, drmBOList *list) return 0; do{ - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first); + ret = ioctl(fd, DRM_IOCTL_BO_OP, first); } while (ret && errno == EAGAIN); @@ -3083,7 +3028,7 @@ int drmBOValidateList(int fd, drmBOList *list) return rep->ret; buf = node->buf; - drmBOCopyReply(rep, buf); + drmBOCopyReply(&rep->bo_info, buf); } return 0; @@ -3095,9 +3040,9 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) drmBONode *node; drmMMListHead *l; - drm_bo_arg_t *arg, *first; - drm_bo_arg_request_t *req; - drm_bo_arg_reply_t *rep; + struct drm_bo_op_arg *arg, *first; + struct drm_bo_op_req *req; + struct drm_bo_arg_rep *rep; drm_u64_t *prevNext = NULL; drmBO *buf; unsigned fence_flags; @@ -3119,16 +3064,16 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) memset(arg, 0, sizeof(*arg)); prevNext = &arg->next; - req->handle = node->buf->handle; + req->bo_req.handle = node->buf->handle; req->op = drm_bo_fence; - req->mask = node->arg0; + req->bo_req.mask = node->arg0; req->arg_handle = fenceHandle; } if (!first) return 0; - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first); + ret = ioctl(fd, DRM_IOCTL_BO_OP, first); if (ret) return -errno; @@ -3143,7 +3088,7 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) return -EFAULT; if (rep->ret) return rep->ret; - drmBOCopyReply(rep, node->buf); + drmBOCopyReply(&rep->bo_info, node->buf); } return 0; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index bd0d2812..0b284cc0 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -125,7 +125,7 @@ typedef struct _drmBO{ typedef struct _drmBONode { drmMMListHead head; drmBO *buf; - drm_bo_arg_t bo_arg; + struct drm_bo_op_arg bo_arg; unsigned long arg0; unsigned long arg1; } drmBONode; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f78a6f95..43be21a8 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1784,6 +1784,80 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +int drm_bo_create_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_bo_create_arg_t arg; + unsigned long next; + drm_user_object_t *uo; + drm_buffer_object_t *entry; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_lock_test(dev, filp); + if (ret) + goto out; + + ret = drm_buffer_object_create(priv->head->dev, + req->size, req->type, req->mask, + req->hint, req->page_alignment, + req->buffer_start, &entry); + if (ret) + goto out; + + ret = drm_bo_add_user_object(priv, entry, + req->mask & DRM_BO_FLAG_SHAREABLE); + if (ret) { + drm_bo_usage_deref_unlocked(entry); + goto out; + } + + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, &rep); + mutex_unlock(&entry->mutex); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); +out: + return 0; +} + +int drm_bo_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_bo_arg_t arg; + drm_bo_arg_request_t *req = &arg.d.req; + drm_bo_arg_reply_t rep; + unsigned long next; + drm_user_object_t *uo; + drm_buffer_object_t *entry; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + rep.ret = 0; + + rep.ret = drm_buffer_object_unmap(priv, req->handle); + + + if (rep.ret == -EAGAIN) + return -EAGAIN; + + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + + return 0; +} + /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be diff --git a/shared-core/drm.h b/shared-core/drm.h index 9810321e..7b3ee153 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -782,32 +782,29 @@ typedef enum { drm_bo_type_kernel, /* for initial kernel allocations */ }drm_bo_type_t; +struct drm_bo_info_req { + unsigned int handle; + unsigned int mask; + unsigned int hint; +}; -typedef struct drm_bo_arg_request { - unsigned handle; /* User space handle */ - unsigned mask; - unsigned hint; +struct drm_bo_create_req { + unsigned int mask; + unsigned int hint; + unsigned page_alignment; drm_u64_t size; drm_bo_type_t type; - unsigned arg_handle; drm_u64_t buffer_start; - unsigned page_alignment; - unsigned expand_pad[4]; /*Future expansion */ - enum { - drm_bo_create, - drm_bo_validate, - drm_bo_map, - drm_bo_unmap, - drm_bo_fence, - drm_bo_destroy, - drm_bo_reference, - drm_bo_unreference, - drm_bo_info, - drm_bo_wait_idle, - drm_bo_ref_fence - } op; -} drm_bo_arg_request_t; +}; +struct drm_bo_op_req { + struct drm_bo_info_req bo_req; + unsigned int arg_handle; + enum { + drm_bo_validate, + drm_bo_fence, + } op; +}; /* * Reply flags @@ -815,30 +812,58 @@ typedef struct drm_bo_arg_request { #define DRM_BO_REP_BUSY 0x00000001 -typedef struct drm_bo_arg_reply { - int ret; - unsigned handle; - unsigned flags; +struct drm_bo_info_rep { + unsigned int handle; + unsigned int flags; drm_u64_t size; drm_u64_t offset; drm_u64_t arg_handle; - unsigned mask; + unsigned int mask; drm_u64_t buffer_start; - unsigned fence_flags; - unsigned rep_flags; - unsigned page_alignment; - unsigned expand_pad[4]; /*Future expansion */ -}drm_bo_arg_reply_t; + unsigned int fence_flags; + unsigned int rep_flags; + unsigned int page_alignment; + unsigned int expand_pad[4]; /*Future expansion */ +}; +struct drm_bo_arg_rep { + int ret; + struct drm_bo_info_rep bo_info; +}; -typedef struct drm_bo_arg{ +struct drm_bo_create_arg { + union { + struct drm_bo_create_req req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_handle_arg { + unsigned int handle; +}; + +struct drm_bo_reference_info_arg { + union { + struct drm_bo_handle_arg req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_map_wait_idle_arg { + union { + struct drm_bo_info_req req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_op_arg { int handled; drm_u64_t next; union { - drm_bo_arg_request_t req; - drm_bo_arg_reply_t rep; + struct drm_bo_op_req req; + struct drm_bo_arg_rep rep; } d; -} drm_bo_arg_t; +}; #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 @@ -926,9 +951,6 @@ typedef struct drm_mm_init_arg { #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) -#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t) -#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t) - #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, drm_mm_init_arg_t) @@ -946,17 +968,15 @@ typedef struct drm_mm_init_arg { #define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, drm_fence_arg_t) #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, drm_fence_arg_t) -#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, drm_bo_arg_t) -#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, drm_bo_arg_t) -#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, drm_bo_arg_t) -#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, drm_bo_arg_t) -#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, drm_bo_arg_t) -#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, drm_bo_arg_t) -#define DRM_IOCTL_BO_VALIDATE DRM_IOWR(0xd3, drm_bo_arg_t) -#define DRM_IOCTL_BO_FENCE DRM_IOWR(0xd4, drm_bo_arg_t) -#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd5, drm_bo_arg_t) -#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd6, drm_bo_arg_t) -#define DRM_IOCTL_BO_REF_FENCE DRM_IOWR(0xd7, drm_bo_arg_t) +#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) +#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) +#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) +#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg) +#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) +#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) /*@}*/ From ae677472af25786fe935309ff1ac287e1610c819 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 May 2007 17:55:57 +1000 Subject: [PATCH 004/437] drm/ttm: remove old fence ioctl --- linux-core/drm_fence.c | 120 ----------------------------------------- 1 file changed, 120 deletions(-) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index fe11e87b..3d928016 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -540,126 +540,6 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) return fence; } -int drm_fence_ioctl(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - int ret; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; - drm_fence_object_t *fence; - drm_user_object_t *uo; - unsigned long flags; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.op) { - case drm_fence_create: - if (arg.flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_object_create(dev, arg.class, - arg.type, arg.flags, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) { - drm_fence_usage_deref_unlocked(dev, fence); - return ret; - } - - /* - * usage > 0. No need to lock dev->struct_mutex; - */ - - atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; - break; - case drm_fence_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - return ret; - case drm_fence_reference: - ret = - drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); - if (ret) - return ret; - fence = drm_lookup_fence_object(priv, arg.handle); - break; - case drm_fence_unreference: - ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); - return ret; - case drm_fence_signaled: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - break; - case drm_fence_flush: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_flush(dev, fence, arg.type); - break; - case drm_fence_wait: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = - drm_fence_object_wait(dev, fence, - arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg.type); - break; - case drm_fence_emit: - LOCK_TEST_WITH_RETURN(dev, filp); - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, - arg.type); - break; - case drm_fence_buffers: - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized\n"); - return -EINVAL; - } - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_buffer_objects(priv, NULL, arg.flags, - NULL, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; - atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; - break; - default: - return -EINVAL; - } - read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); - drm_fence_usage_deref_unlocked(dev, fence); - - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); - return ret; -} - int drm_fence_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; From b2a875ba8955cfbf3df2dc1ecb25915a252eef9f Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 May 2007 18:25:15 +1000 Subject: [PATCH 005/437] ttm: complete drm buffer object ioctl split retain the op operation for validate/fence operations --- linux-core/drm_bo.c | 293 +++++++++++++++++++++++---------------- linux-core/drm_drv.c | 14 +- linux-core/drm_objects.h | 12 +- shared-core/drm.h | 1 + 4 files changed, 202 insertions(+), 118 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 43be21a8..be5fd6a8 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1068,7 +1068,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, */ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; @@ -1096,7 +1096,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, uint32_t map_flags, unsigned hint, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; drm_device_t *dev = priv->head->dev; @@ -1459,7 +1459,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, uint32_t flags, uint32_t mask, uint32_t hint, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; int ret; @@ -1494,8 +1494,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, - drm_bo_arg_reply_t * rep) +static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle, + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; @@ -1512,8 +1512,9 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, - uint32_t hint, drm_bo_arg_reply_t * rep) +static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, + uint32_t hint, + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1652,15 +1653,14 @@ static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) return 0; } -int drm_bo_ioctl(DRM_IOCTL_ARGS) +int drm_bo_op_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_info_rep rep; unsigned long next; - drm_user_object_t *uo; - drm_buffer_object_t *entry; + int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1675,97 +1675,28 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) continue; } - rep.ret = 0; + ret = 0; switch (req->op) { - case drm_bo_create: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) - break; - rep.ret = - drm_buffer_object_create(priv->head->dev, - req->size, - req->type, - req->mask, - req->hint, - req->page_alignment, - req->buffer_start, &entry); - if (rep.ret) - break; - - rep.ret = - drm_bo_add_user_object(priv, entry, - req-> - mask & - DRM_BO_FLAG_SHAREABLE); - if (rep.ret) - drm_bo_usage_deref_unlocked(entry); - - if (rep.ret) - break; - - mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, &rep); - mutex_unlock(&entry->mutex); - break; - case drm_bo_unmap: - rep.ret = drm_buffer_object_unmap(priv, req->handle); - break; - case drm_bo_map: - rep.ret = drm_buffer_object_map(priv, req->handle, - req->mask, - req->hint, &rep); - break; - case drm_bo_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, req->handle); - if (!uo || (uo->type != drm_buffer_type) - || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - rep.ret = -EINVAL; - break; - } - rep.ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - break; - case drm_bo_reference: - rep.ret = drm_user_object_ref(priv, req->handle, - drm_buffer_type, &uo); - if (rep.ret) - break; - - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_unreference: - rep.ret = drm_user_object_unref(priv, req->handle, - drm_buffer_type); - break; case drm_bo_validate: - rep.ret = drm_bo_lock_test(dev, filp); - - if (rep.ret) + ret = drm_bo_lock_test(dev, filp); + if (ret) break; - rep.ret = - drm_bo_handle_validate(priv, req->handle, req->mask, - req->arg_handle, req->hint, - &rep); + ret = drm_bo_handle_validate(priv, req->bo_req.handle, + req->bo_req.mask, + req->arg_handle, + req->bo_req.hint, + &rep); break; case drm_bo_fence: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) + ret = drm_bo_lock_test(dev, filp); + if (ret) break; - /**/ break; - case drm_bo_info: - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_wait_idle: - rep.ret = drm_bo_handle_wait(priv, req->handle, - req->hint, &rep); break; case drm_bo_ref_fence: - rep.ret = -EINVAL; + ret = -EINVAL; DRM_ERROR("Function is not implemented yet.\n"); default: - rep.ret = -EINVAL; + ret = -EINVAL; } next = arg.next; @@ -1773,11 +1704,12 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) * A signal interrupted us. Make sure the ioctl is restartable. */ - if (rep.ret == -EAGAIN) + if (ret == -EAGAIN) return -EAGAIN; arg.handled = 1; - arg.d.rep = rep; + arg.d.rep.ret = ret; + arg.d.rep.bo_info = rep; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); data = next; } while (data); @@ -1787,9 +1719,9 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) int drm_bo_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_create_arg_t arg; - unsigned long next; - drm_user_object_t *uo; + struct drm_bo_create_arg arg; + struct drm_bo_create_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; drm_buffer_object_t *entry; int ret = 0; @@ -1819,23 +1751,21 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) } mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, &rep); + drm_bo_fill_rep_arg(entry, rep); mutex_unlock(&entry->mutex); DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); out: - return 0; + return ret; } -int drm_bo_ioctl(DRM_IOCTL_ARGS) + +int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t rep; - unsigned long next; + struct drm_bo_handle_arg arg; drm_user_object_t *uo; - drm_buffer_object_t *entry; + int ret = 0; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1844,20 +1774,151 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - rep.ret = 0; - - rep.ret = drm_buffer_object_unmap(priv, req->handle); - - - if (rep.ret == -EAGAIN) - return -EAGAIN; - + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} +int drm_bo_map_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_buffer_object_map(priv, req->handle, req->mask, + req->hint, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } +int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_buffer_object_unmap(priv, arg.handle); + return ret; +} + + +int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + drm_user_object_t *uo; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_user_object_ref(priv, req->handle, + drm_buffer_type, &uo); + if (ret) + return ret; + + ret = drm_bo_handle_info(priv, req->handle, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type); + return ret; +} + +int drm_bo_info_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_handle_info(priv, req->handle, rep); + if (ret) + return ret; + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_handle_wait(priv, req->handle, + req->hint, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + + + /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 6b98f2c1..b931ce2f 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -118,7 +118,7 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + // [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, @@ -142,6 +142,18 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_CREATE)] = {drm_bo_create_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_DESTROY)] = {drm_bo_destroy_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_MAP)] = {drm_bo_map_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_UNMAP)] = {drm_bo_unmap_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_REFERENCE)] = {drm_bo_reference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_UNREFERENCE)] = {drm_bo_unreference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_OP)] = {drm_bo_op_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_INFO)] = {drm_bo_info_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_WAIT_IDLE)] = {drm_bo_wait_idle_ioctl, DRM_AUTH}, + + + }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 17338da5..61059a05 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -436,7 +436,17 @@ typedef struct drm_bo_driver { * buffer objects (drm_bo.c) */ -extern int drm_bo_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_create_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_map_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_reference_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_info_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_op_ioctl(DRM_IOCTL_ARGS); + + extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); diff --git a/shared-core/drm.h b/shared-core/drm.h index 7b3ee153..ae308be6 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -803,6 +803,7 @@ struct drm_bo_op_req { enum { drm_bo_validate, drm_bo_fence, + drm_bo_ref_fence, } op; }; From b6b5df24b962c94433afe4d8665b5f145bfa1ad3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 12 Jun 2007 12:21:38 +0200 Subject: [PATCH 006/437] Try to make buffer object / fence object ioctl args 64-bit safe. Introduce tile members for future tiled buffer support. Allow user-space to explicitly define a fence-class. Remove the implicit fence-class mechanism. 64-bit wide buffer object flag member. --- libdrm/xf86drm.c | 37 ++++++++------ libdrm/xf86mm.h | 19 +++++--- linux-core/drm_bo.c | 79 ++++++++++++++++++++++++------ linux-core/drm_compat.c | 6 +-- linux-core/drm_objects.h | 8 +-- linux-core/i915_buffer.c | 5 +- linux-core/via_buffer.c | 5 +- shared-core/drm.h | 103 +++++++++++++++++++++++---------------- shared-core/i915_drv.h | 4 +- shared-core/via_drv.h | 4 +- 10 files changed, 173 insertions(+), 97 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 63b9354f..4f12bd0b 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2611,13 +2611,16 @@ static void drmBOCopyReply(struct drm_bo_info_rep *rep, buf->fenceFlags = rep->fence_flags; buf->replyFlags = rep->rep_flags; buf->pageAlignment = rep->page_alignment; + buf->tileInfo = rep->tile_info; + buf->hwTileStride = rep->hw_tile_stride; + buf->desiredTileStride = rep->desired_tile_stride; } - - -int drmBOCreate(int fd, unsigned long start, unsigned long size, - unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, - unsigned mask, + + +int drmBOCreate(int fd, unsigned long start, unsigned long size, + unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, + drm_u64_t mask, unsigned hint, drmBO *buf) { struct drm_bo_create_arg arg; @@ -2798,8 +2801,9 @@ int drmBOUnmap(int fd, drmBO *buf) } return 0; } - -int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, + +int drmBOValidate(int fd, drmBO *buf, + drm_u64_t flags, drm_u64_t mask, unsigned hint) { struct drm_bo_op_arg arg; @@ -2809,9 +2813,10 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, memset(&arg, 0, sizeof(arg)); req->bo_req.handle = buf->handle; - req->bo_req.mask = flags; + req->bo_req.flags = flags; + req->bo_req.mask = mask; req->bo_req.hint = hint; - req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */ + req->bo_req.fence_class = 0; /* Backwards compatibility. */ req->op = drm_bo_validate; do{ @@ -2839,7 +2844,7 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle) memset(&arg, 0, sizeof(arg)); req->bo_req.handle = buf->handle; - req->bo_req.mask = flags; + req->bo_req.flags = flags; req->arg_handle = fenceHandle; req->op = drm_bo_fence; @@ -2999,12 +3004,13 @@ int drmBOValidateList(int fd, drmBOList *list) prevNext = &arg->next; req->bo_req.handle = node->buf->handle; req->op = drm_bo_validate; - req->bo_req.mask = node->arg0; + req->bo_req.flags = node->arg0; req->bo_req.hint = 0; - req->arg_handle = node->arg1; + req->bo_req.mask = node->arg1; + req->bo_req.fence_class = 0; /* Backwards compat. */ } - - if (!first) + + if (!first) return 0; do{ @@ -3101,6 +3107,9 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, memset(&arg, 0, sizeof(arg)); + arg.magic = DRM_BO_INIT_MAGIC; + arg.major = DRM_BO_INIT_MAJOR; + arg.minor = DRM_BO_INIT_MINOR; arg.p_offset = pOffset; arg.p_size = pSize; arg.mem_type = memType; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index 0b284cc0..68cd7cdd 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -106,8 +106,8 @@ typedef struct _drmBO{ drm_bo_type_t type; unsigned handle; drm_u64_t mapHandle; - unsigned flags; - unsigned mask; + drm_u64_t flags; + drm_u64_t mask; unsigned mapFlags; unsigned long size; unsigned long offset; @@ -115,6 +115,9 @@ typedef struct _drmBO{ unsigned replyFlags; unsigned fenceFlags; unsigned pageAlignment; + unsigned tileInfo; + unsigned hwTileStride; + unsigned desiredTileStride; void *virtual; void *mapVirtual; int mapCount; @@ -172,8 +175,8 @@ extern int drmBOCreateList(int numTarget, drmBOList *list); */ extern int drmBOCreate(int fd, unsigned long start, unsigned long size, - unsigned pageAlignment,void *user_buffer, - drm_bo_type_t type, unsigned mask, + unsigned pageAlignment,void *user_buffer, + drm_bo_type_t type, drm_u64_t mask, unsigned hint, drmBO *buf); extern int drmBODestroy(int fd, drmBO *buf); extern int drmBOReference(int fd, unsigned handle, drmBO *buf); @@ -181,14 +184,14 @@ extern int drmBOUnReference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address); extern int drmBOUnmap(int fd, drmBO *buf); -extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, - unsigned hint); +extern int drmBOValidate(int fd, drmBO *buf, drm_u64_t flags, + drm_u64_t mask, unsigned hint); + extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle); extern int drmBOInfo(int fd, drmBO *buf); extern int drmBOBusy(int fd, drmBO *buf, int *busy); - -extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, +extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, unsigned mask, int *newItem); extern int drmBOValidateList(int fd, drmBOList *list); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index be5fd6a8..b6a31943 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -195,8 +195,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { drm_bo_mem_reg_t *old_mem = &bo->mem; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; *old_mem = *mem; mem->mm_node = NULL; @@ -871,7 +871,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); static int drm_bo_new_mask(drm_buffer_object_t * bo, - uint32_t new_mask, uint32_t hint) + uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -1343,7 +1343,8 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) return 0; } - DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", + (unsigned long long) mem->mask); return -EINVAL; } @@ -1352,22 +1353,45 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) */ static int drm_buffer_object_validate(drm_buffer_object_t * bo, + uint32_t fence_class, int move_unfenced, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; drm_bo_driver_t *driver = dev->driver->bo_driver; + uint32_t ftype; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, - bo->mem.flags); - ret = - driver->fence_type(bo, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", + (unsigned long long) bo->mem.mask, + (unsigned long long) bo->mem.flags); + + ret = driver->fence_type(bo, &ftype); + if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } + /* + * We're switching command submission mechanism, + * or cannot simply rely on the hardware serializing for us. + * + * Wait for buffer idle. + */ + + if ((fence_class != bo->fence_class) || + ((ftype ^ bo->fence_type) & bo->fence_type)) { + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret) + return ret; + + } + + bo->fence_class = fence_class; + bo->fence_type = ftype; ret = drm_bo_wait_unmapped(bo, no_wait); if (ret) return ret; @@ -1457,8 +1481,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return 0; } -static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, - uint32_t flags, uint32_t mask, uint32_t hint, +static int drm_bo_handle_validate(drm_file_t * priv, + uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; @@ -1482,7 +1508,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, goto out; ret = - drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); @@ -1544,7 +1571,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, int drm_buffer_object_create(drm_device_t *dev, unsigned long size, drm_bo_type_t type, - uint32_t mask, + uint64_t mask, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, @@ -1596,8 +1623,8 @@ int drm_buffer_object_create(drm_device_t *dev, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->mem.flags = 0; - bo->mem.mask = 0; + bo->mem.flags = 0ULL; + bo->mem.mask = 0ULL; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1611,7 +1638,7 @@ int drm_buffer_object_create(drm_device_t *dev, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); + ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1682,8 +1709,9 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) if (ret) break; ret = drm_bo_handle_validate(priv, req->bo_req.handle, + req->bo_req.fence_class, + req->bo_req.flags, req->bo_req.mask, - req->arg_handle, req->bo_req.hint, &rep); break; @@ -2305,6 +2333,25 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); ret = -EINVAL; + if (arg.magic != DRM_BO_INIT_MAGIC) { + DRM_ERROR("You are using an old libdrm that is not compatible with\n" + "\tthe kernel DRM module. Please upgrade your libdrm.\n"); + return -EINVAL; + } + if (arg.major != DRM_BO_INIT_MAJOR) { + DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" + "\tversion don't match. Got %d, expected %d,\n", + arg.major, DRM_BO_INIT_MAJOR); + return -EINVAL; + } + if (arg.minor > DRM_BO_INIT_MINOR) { + DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" + "\tlibdrm buffer object interface version is %d.%d.\n" + "\tkernel DRM buffer object interface version is %d.%d\n", + arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); + return -EINVAL; + } + mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); if (!bm->initialized) { diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 867cee85..d47b92e5 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -184,7 +184,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); return ret; } -#if 0 + static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { @@ -195,9 +195,9 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); return ret; } -#endif -static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + +static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 61059a05..b40320aa 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -321,8 +321,8 @@ typedef struct drm_bo_mem_reg { unsigned long num_pages; uint32_t page_alignment; uint32_t mem_type; - uint32_t flags; - uint32_t mask; + uint64_t flags; + uint64_t mask; } drm_bo_mem_reg_t; typedef struct drm_buffer_object { @@ -423,8 +423,8 @@ typedef struct drm_bo_driver { uint32_t num_mem_busy_prio; drm_ttm_backend_t *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type); - int (*invalidate_caches) (struct drm_device * dev, uint32_t flags); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, drm_mem_type_manager_t * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 8589f467..2850fb94 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,9 +38,8 @@ drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) { - *class = 0; if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else @@ -48,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) return 0; } -int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) +int i915_invalidate_caches(drm_device_t * dev, uint64_t flags) { /* * FIXME: Only emit once per batchbuffer submission. diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index ebc8c371..86883998 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -37,14 +37,13 @@ drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev) return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) { - *class = 0; *type = 3; return 0; } -int via_invalidate_caches(drm_device_t * dev, uint32_t flags) +int via_invalidate_caches(drm_device_t * dev, uint64_t flags) { /* * FIXME: Invalidate texture caches here. diff --git a/shared-core/drm.h b/shared-core/drm.h index ae308be6..bc2e718c 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -671,12 +671,13 @@ typedef struct drm_set_version { #define DRM_FENCE_TYPE_EXE 0x00000001 typedef struct drm_fence_arg { - unsigned handle; - int class; - unsigned type; - unsigned flags; - unsigned signaled; - unsigned expand_pad[4]; /*Future expansion */ + unsigned int handle; + unsigned int class; + unsigned int type; + unsigned int flags; + unsigned int signaled; + unsigned int pad_64; + drm_u64_t expand_pad[3]; /*Future expansion */ } drm_fence_arg_t; /* Buffer permissions, referring to how the GPU uses the buffers. @@ -685,9 +686,9 @@ typedef struct drm_fence_arg { * a command (batch-) buffer is exe. Can be or-ed together. */ -#define DRM_BO_FLAG_READ 0x00000001 -#define DRM_BO_FLAG_WRITE 0x00000002 -#define DRM_BO_FLAG_EXE 0x00000004 +#define DRM_BO_FLAG_READ (1ULL << 0) +#define DRM_BO_FLAG_WRITE (1ULL << 1) +#define DRM_BO_FLAG_EXE (1ULL << 2) /* * Status flags. Can be read to determine the actual state of a buffer. @@ -700,25 +701,25 @@ typedef struct drm_fence_arg { * or lock. * Flags: Acknowledge */ -#define DRM_BO_FLAG_NO_EVICT 0x00000010 +#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) /* * Mask: Require that the buffer is placed in mappable memory when validated. * If not set the buffer may or may not be in mappable memory when validated. * Flags: If set, the buffer is in mappable memory. */ -#define DRM_BO_FLAG_MAPPABLE 0x00000020 +#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) /* Mask: The buffer should be shareable with other processes. * Flags: The buffer is shareable with other processes. */ -#define DRM_BO_FLAG_SHAREABLE 0x00000040 +#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) /* Mask: If set, place the buffer in cache-coherent memory if available. * If clear, never place the buffer in cache coherent memory if validated. * Flags: The buffer is currently in cache-coherent memory. */ -#define DRM_BO_FLAG_CACHED 0x00000080 +#define DRM_BO_FLAG_CACHED (1ULL << 7) /* Mask: Make sure that every time this buffer is validated, * it ends up on the same location provided that the memory mask is the same. @@ -727,23 +728,23 @@ typedef struct drm_fence_arg { * part of buffer manager shutdown or locking. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_NO_MOVE 0x00000100 +#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) /* Mask: Make sure the buffer is in cached memory when mapped for reading. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_READ_CACHED 0x00080000 +#define DRM_BO_FLAG_READ_CACHED (1ULL << 16) /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_FORCE_CACHING 0x00002000 +#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) /* * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000 +#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) /* * Memory type flags that can be or'ed together in the mask, but only @@ -751,21 +752,25 @@ typedef struct drm_fence_arg { */ /* System memory */ -#define DRM_BO_FLAG_MEM_LOCAL 0x01000000 +#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) /* Translation table memory */ -#define DRM_BO_FLAG_MEM_TT 0x02000000 +#define DRM_BO_FLAG_MEM_TT (1ULL << 25) /* Vram memory */ -#define DRM_BO_FLAG_MEM_VRAM 0x04000000 +#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) /* Up to the driver to define. */ -#define DRM_BO_FLAG_MEM_PRIV0 0x08000000 -#define DRM_BO_FLAG_MEM_PRIV1 0x10000000 -#define DRM_BO_FLAG_MEM_PRIV2 0x20000000 -#define DRM_BO_FLAG_MEM_PRIV3 0x40000000 -#define DRM_BO_FLAG_MEM_PRIV4 0x80000000 +#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) +#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) +#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) +#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) +#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) +/* We can add more of these now with a 64-bit flag type */ /* Memory flag mask */ -#define DRM_BO_MASK_MEM 0xFF000000 -#define DRM_BO_MASK_MEMTYPE 0xFF0000A0 +#define DRM_BO_MASK_MEM 0x00000000FF000000ULL +#define DRM_BO_MASK_MEMTYPE 0x00000000FF0000A0ULL + +/* Driver-private flags */ +#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL /* Don't block on validate and map */ #define DRM_BO_HINT_DONT_BLOCK 0x00000002 @@ -774,6 +779,10 @@ typedef struct drm_fence_arg { #define DRM_BO_HINT_WAIT_LAZY 0x00000008 #define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010 +#define DRM_BO_INIT_MAGIC 0xfe769812 +#define DRM_BO_INIT_MAJOR 0 +#define DRM_BO_INIT_MINOR 1 + typedef enum { drm_bo_type_dc, @@ -783,28 +792,30 @@ typedef enum { }drm_bo_type_t; struct drm_bo_info_req { - unsigned int handle; - unsigned int mask; + drm_u64_t mask; + drm_u64_t flags; + unsigned int handle; unsigned int hint; + unsigned int fence_class; }; struct drm_bo_create_req { - unsigned int mask; - unsigned int hint; - unsigned page_alignment; + drm_u64_t mask; drm_u64_t size; - drm_bo_type_t type; drm_u64_t buffer_start; + unsigned int hint; + unsigned int page_alignment; + drm_bo_type_t type; }; struct drm_bo_op_req { - struct drm_bo_info_req bo_req; - unsigned int arg_handle; enum { drm_bo_validate, drm_bo_fence, drm_bo_ref_fence, } op; + unsigned int arg_handle; + struct drm_bo_info_req bo_req; }; /* @@ -814,22 +825,26 @@ struct drm_bo_op_req { #define DRM_BO_REP_BUSY 0x00000001 struct drm_bo_info_rep { - unsigned int handle; - unsigned int flags; + drm_u64_t flags; + drm_u64_t mask; drm_u64_t size; drm_u64_t offset; drm_u64_t arg_handle; - unsigned int mask; drm_u64_t buffer_start; + unsigned int handle; unsigned int fence_flags; unsigned int rep_flags; unsigned int page_alignment; - unsigned int expand_pad[4]; /*Future expansion */ + unsigned int desired_tile_stride; + unsigned int hw_tile_stride; + unsigned int tile_info; + unsigned int pad64; + drm_u64_t expand_pad[4]; /*Future expansion */ }; struct drm_bo_arg_rep { - int ret; struct drm_bo_info_rep bo_info; + int ret; }; struct drm_bo_create_arg { @@ -859,6 +874,7 @@ struct drm_bo_map_wait_idle_arg { struct drm_bo_op_arg { int handled; + unsigned int pad_64; drm_u64_t next; union { struct drm_bo_op_req req; @@ -879,12 +895,15 @@ struct drm_bo_op_arg { typedef struct drm_mm_type_arg { unsigned int mem_type; -} drm_mm_type_arg_t; +} drm_mm_type_arg_t; typedef struct drm_mm_init_arg { + unsigned int magic; + unsigned int major; + unsigned int minor; + unsigned int mem_type; drm_u64_t p_offset; drm_u64_t p_size; - unsigned int mem_type; } drm_mm_init_arg_t; /** diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e8a7be29..52a958d9 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -196,8 +196,8 @@ extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) #ifdef I915_HAVE_BUFFER /* i915_buffer.c */ extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev); -extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type); -extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); +extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *type); +extern int i915_invalidate_caches(drm_device_t *dev, uint64_t buffer_flags); extern int i915_init_mem_type(drm_device_t *dev, uint32_t type, drm_mem_type_manager_t *man); extern uint32_t i915_evict_mask(drm_buffer_object_t *bo); diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index baafbbff..b6dbf6c1 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -205,8 +205,8 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class, #ifdef VIA_HAVE_BUFFER extern drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t *dev); -extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type); -extern int via_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); +extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *type); +extern int via_invalidate_caches(drm_device_t *dev, uint64_t buffer_flags); extern int via_init_mem_type(drm_device_t *dev, uint32_t type, drm_mem_type_manager_t *man); extern uint32_t via_evict_mask(drm_buffer_object_t *bo); From f984b1b8d17f285dfacb593702178f1eb2fdb4ac Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 12 Jun 2007 12:30:33 +0200 Subject: [PATCH 007/437] Fix some obvious bugs. --- linux-core/drm_bo.c | 6 +++--- shared-core/drm.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b6a31943..a1a27d1e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1716,13 +1716,13 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) &rep); break; case drm_bo_fence: - ret = drm_bo_lock_test(dev, filp); - if (ret) - break; + ret = -EINVAL; + DRM_ERROR("Function is not implemented yet.\n"); break; case drm_bo_ref_fence: ret = -EINVAL; DRM_ERROR("Function is not implemented yet.\n"); + break; default: ret = -EINVAL; } diff --git a/shared-core/drm.h b/shared-core/drm.h index bc2e718c..1b0e54e3 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -733,7 +733,7 @@ typedef struct drm_fence_arg { /* Mask: Make sure the buffer is in cached memory when mapped for reading. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_READ_CACHED (1ULL << 16) +#define DRM_BO_FLAG_READ_CACHED (1ULL << 19) /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. * Flags: Acknowledge. From 5156f1c897142171e78d0ea2c45a3aecb581fffa Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 13 Jun 2007 15:19:30 +0200 Subject: [PATCH 008/437] Fix fence object deref race. --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1c7013b3..bcb5c95d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -338,7 +338,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { - drm_fence_usage_deref_locked(dev, bo->fence); + drm_fence_usage_deref_unlocked(dev, bo->fence); bo->fence = NULL; } From 62082ab3e63f6f474655da98b710e453b4124ed1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 13 Jun 2007 15:38:59 +0200 Subject: [PATCH 009/437] Make sure we read fence->signaled while spinlocked. --- linux-core/drm_bo.c | 11 ++++++----- linux-core/drm_fence.c | 18 ++++++------------ linux-core/drm_objects.h | 4 +++- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index bcb5c95d..1c0eebd0 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -268,7 +268,7 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, if (fence) { drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; @@ -337,7 +337,8 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { + if (bo->fence && drm_fence_object_signaled(dev, bo->fence, + bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, bo->fence); bo->fence = NULL; } @@ -944,7 +945,7 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; @@ -966,13 +967,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; } drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE); - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ce161dc3..70baad9f 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -164,7 +164,7 @@ static void drm_fence_object_destroy(drm_file_t * priv, drm_fence_usage_deref_locked(dev, fence); } -static int fence_signaled(drm_device_t * dev, +int drm_fence_object_signaled(drm_device_t * dev, drm_fence_object_t * fence, uint32_t mask, int poke_flush) { @@ -200,12 +200,6 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_signaled(drm_fence_object_t * fence, - uint32_t type) -{ - return ((fence->signaled & type) == type); -} - int drm_fence_object_flush(drm_device_t * dev, drm_fence_object_t * fence, uint32_t type) @@ -298,13 +292,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev, do { DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, - (signaled = fence_signaled(dev, fence, mask, 1))); + (signaled = drm_fence_object_signaled(dev, fence, mask, 1))); if (signaled) return 0; if (time_after_eq(jiffies, _end)) break; } while (ret == -EINTR && ignore_signals); - if (fence_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(dev, fence, mask, 0)) return 0; if (time_after_eq(jiffies, _end)) ret = -EBUSY; @@ -334,7 +328,7 @@ int drm_fence_object_wait(drm_device_t * dev, return -EINVAL; } - if (fence_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(dev, fence, mask, 0)) return 0; _end = jiffies + 3 * DRM_HZ; @@ -365,7 +359,7 @@ int drm_fence_object_wait(drm_device_t * dev, return ret; } } - if (drm_fence_object_signaled(fence, mask)) + if (drm_fence_object_signaled(dev, fence, mask, 0)) return 0; /* @@ -377,7 +371,7 @@ int drm_fence_object_wait(drm_device_t * dev, #endif do { schedule(); - signaled = fence_signaled(dev, fence, mask, 1); + signaled = drm_fence_object_signaled(dev, fence, mask, 1); } while (!signaled && !time_after_eq(jiffies, _end)); if (!signaled) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 03ea927e..42c8e536 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -198,7 +198,9 @@ extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); extern int drm_fence_object_flush(struct drm_device *dev, drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_device *dev, + drm_fence_object_t * fence, + uint32_t type, int flush); extern void drm_fence_usage_deref_locked(struct drm_device *dev, drm_fence_object_t * fence); extern void drm_fence_usage_deref_unlocked(struct drm_device *dev, From 2407ce57de36470e767ebc1800cbbec74cab0ae4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 13 Jun 2007 15:59:28 +0200 Subject: [PATCH 010/437] Fix drmMMUnlock / drmMMLock return values. --- libdrm/xf86drm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 1f242fe3..93185512 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -3260,7 +3260,8 @@ int drmMMLock(int fd, unsigned memType) do{ ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg); } while (ret && errno == EAGAIN); - return -errno; + + return (ret) ? -errno : 0; } int drmMMUnlock(int fd, unsigned memType) @@ -3275,7 +3276,8 @@ int drmMMUnlock(int fd, unsigned memType) do{ ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg); } while (ret && errno == EAGAIN); - return -errno; + + return (ret) ? -errno : 0; } #define DRM_MAX_FDS 16 From e1b8eabeee354822fc0a413dd097210b621eb73a Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 14 Jun 2007 11:52:38 +0200 Subject: [PATCH 011/437] Locking fixes and instrumentation. --- linux-core/drm_bo.c | 31 +++++++++++++++++++++++++++++++ linux-core/drm_fence.c | 23 +++++++++++------------ linux-core/drm_object.c | 9 +++++++++ linux-core/drm_objects.h | 8 ++++++++ 4 files changed, 59 insertions(+), 12 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1c0eebd0..b9a261d5 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -67,6 +67,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) { drm_mem_type_manager_t *man; + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + DRM_ASSERT_LOCKED(&bo->mutex); + man = &bo->dev->bm.man[bo->pinned_mem_type]; list_add_tail(&bo->pinned_lru, &man->pinned); } @@ -75,6 +78,8 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) { drm_mem_type_manager_t *man; + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; @@ -134,6 +139,8 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) int ret = 0; bo->ttm = NULL; + DRM_ASSERT_LOCKED(&bo->mutex); + switch (bo->type) { case drm_bo_type_dc: bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); @@ -266,6 +273,8 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, drm_fence_object_t *fence = bo->fence; int ret; + DRM_ASSERT_LOCKED(&bo->mutex); + if (fence) { drm_device_t *dev = bo->dev; if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { @@ -331,6 +340,8 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + atomic_inc(&bo->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&bo->mutex); @@ -393,6 +404,8 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { @@ -415,6 +428,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) atomic_dec(&bm->count); + BUG_ON(!list_empty(&bo->base.list)); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); return; @@ -491,6 +505,8 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) { + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + if (atomic_dec_and_test(&bo->usage)) { drm_bo_destroy_locked(bo); } @@ -501,6 +517,8 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) drm_buffer_object_t *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + drm_bo_takedown_vm_locked(bo); drm_bo_usage_deref_locked(bo); } @@ -1462,11 +1480,14 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, uint32_t flags, uint32_t mask, uint32_t hint, drm_bo_arg_reply_t * rep) { + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); if (!bo) { return -EINVAL; } @@ -1498,9 +1519,13 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; + mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { return -EINVAL; } @@ -1520,7 +1545,11 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; + struct drm_device *dev = priv->head->dev; + mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { return -EINVAL; } @@ -2319,6 +2348,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_local_map_t *map; drm_device_t *dev = bo->dev; + DRM_ASSERT_LOCKED(&dev->struct_mutex); if (list->user_token) { drm_ht_remove_item(&dev->map_hash, &list->hash); list->user_token = 0; @@ -2344,6 +2374,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) drm_local_map_t *map; drm_device_t *dev = bo->dev; + DRM_ASSERT_LOCKED(&dev->struct_mutex); list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); if (!list->map) return -ENOMEM; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 70baad9f..b5fc2235 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -129,11 +129,14 @@ void drm_fence_usage_deref_locked(drm_device_t * dev, { drm_fence_manager_t *fm = &dev->fm; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + if (atomic_dec_and_test(&fence->usage)) { drm_fence_unring(dev, &fence->ring); DRM_DEBUG("Destroyed a fence object 0x%08lx\n", fence->base.hash.key); atomic_dec(&fm->count); + BUG_ON(!list_empty(&fence->base.list)); drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); } } @@ -148,6 +151,7 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev, if (atomic_read(&fence->usage) == 0) { drm_fence_unring(dev, &fence->ring); atomic_dec(&fm->count); + BUG_ON(!list_empty(&fence->base.list)); drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); @@ -448,15 +452,16 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, mutex_lock(&dev->struct_mutex); ret = drm_add_user_object(priv, &fence->base, shareable); - mutex_unlock(&dev->struct_mutex); if (ret) - return ret; + goto out; + atomic_inc(&fence->usage); fence->base.type = drm_fence_type; fence->base.remove = &drm_fence_object_destroy; DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); - return 0; +out: + mutex_unlock(&dev->struct_mutex); + return ret; } - EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, @@ -466,7 +471,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, int ret; drm_fence_manager_t *fm = &dev->fm; - fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE); + fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) return -ENOMEM; ret = drm_fence_object_init(dev, class, type, flags, fence); @@ -566,13 +571,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) drm_fence_usage_deref_unlocked(dev, fence); return ret; } - - /* - * usage > 0. No need to lock dev->struct_mutex; - */ - - atomic_inc(&fence->usage); arg.handle = fence->base.hash.key; + break; case drm_fence_destroy: mutex_lock(&dev->struct_mutex); @@ -637,7 +637,6 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; - atomic_inc(&fence->usage); arg.handle = fence->base.hash.key; break; default: diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 03906034..567a7d2b 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -36,6 +36,8 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, drm_device_t *dev = priv->head->dev; int ret; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + atomic_set(&item->refcount, 1); item->shareable = shareable; item->owner = priv; @@ -56,6 +58,8 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) int ret; drm_user_object_t *item; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + ret = drm_ht_find_item(&dev->object_hash, key, &hash); if (ret) { return NULL; @@ -88,6 +92,8 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) { + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); + if (item->owner != priv) { DRM_ERROR("Cannot destroy object not owned by you.\n"); return -EINVAL; @@ -125,6 +131,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, drm_ref_object_t *item; drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); if (!referenced_object->shareable && priv != referenced_object->owner) { DRM_ERROR("Not allowed to reference this object\n"); return -EINVAL; @@ -181,6 +188,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, drm_hash_item_t *hash; int ret; + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], (unsigned long)referenced_object, &hash); if (ret) @@ -213,6 +221,7 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; drm_ref_t unref_action; + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); unref_action = item->unref_action; if (atomic_dec_and_test(&item->refcount)) { ret = drm_ht_remove_item(ht, &item->hash); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 42c8e536..59c8902d 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -473,4 +473,12 @@ extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, uint32_t fence_flags, drm_bo_mem_reg_t * new_mem); +#ifdef CONFIG_DEBUG_MUTEXES +#define DRM_ASSERT_LOCKED(_mutex) \ + BUG_ON(!mutex_is_locked(_mutex) || \ + ((_mutex)->owner != current_thread_info())) +#else +#define DRM_ASSERT_LOCKED(_mutex) +#endif + #endif From d34b2c7b9e108766b1d67cd23b8f7ecc77835ac7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 15 Jun 2007 10:21:31 +0200 Subject: [PATCH 012/437] Fix refcounting / lock race. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported by Steve Wilkins / Michel Dänzer. --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b9a261d5..2a16ba5a 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -618,6 +618,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, if (entry->fence) drm_fence_usage_deref_locked(dev, entry->fence); entry->fence = fence; + atomic_inc(&fence->usage); DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); @@ -627,7 +628,6 @@ int drm_fence_buffer_objects(drm_file_t * priv, drm_bo_usage_deref_locked(entry); l = f_list.next; } - atomic_add(count, &fence->usage); DRM_DEBUG("Fenced %d buffers\n", count); out: mutex_unlock(&dev->struct_mutex); From 3ee31a1f356df4b81e3ba226a416627fd3b70e07 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 15 Jun 2007 10:31:32 +0200 Subject: [PATCH 013/437] Indentation fixes. --- linux-core/drm_bo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2a16ba5a..f1ca0b44 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1480,7 +1480,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, uint32_t flags, uint32_t mask, uint32_t hint, drm_bo_arg_reply_t * rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1519,7 +1519,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; mutex_lock(&dev->struct_mutex); @@ -1541,11 +1541,11 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, uint32_t hint, drm_bo_arg_reply_t * rep) { + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; - struct drm_device *dev = priv->head->dev; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); mutex_unlock(&dev->struct_mutex); From 84bea383538df83c049680497ba2179e50d07ca3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 15 Jun 2007 10:35:52 +0200 Subject: [PATCH 014/437] Fix i915 sequence mask. --- linux-core/i915_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 7fdb0839..4c35b4c3 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -43,7 +43,7 @@ static drm_fence_driver_t i915_fence_driver = { .num_classes = 1, .wrap_diff = (1 << 30), .flush_diff = (1 << 29), - .sequence_mask = 0xffffffffU, + .sequence_mask = 0x7fffffffU, .lazy_capable = 1, .emit = i915_fence_emit_sequence, .poke_flush = i915_poke_flush, From 3d5d41fa9823cf44138c8f4bc954bca80539d74e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 15 Jun 2007 17:13:11 +0200 Subject: [PATCH 015/437] i915: Fix handling of breadcrumb counter wraparounds. --- linux-core/i915_drv.c | 6 +++--- linux-core/i915_fence.c | 2 +- shared-core/i915_dma.c | 8 +++++--- shared-core/i915_drv.h | 3 +++ 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 4c35b4c3..49437066 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -41,9 +41,9 @@ static struct pci_device_id pciidlist[] = { #ifdef I915_HAVE_FENCE static drm_fence_driver_t i915_fence_driver = { .num_classes = 1, - .wrap_diff = (1 << 30), - .flush_diff = (1 << 29), - .sequence_mask = 0x7fffffffU, + .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), + .flush_diff = (1U << (BREADCRUMB_BITS - 2)), + .sequence_mask = BREADCRUMB_MASK, .lazy_capable = 1, .emit = i915_fence_emit_sequence, .poke_flush = i915_poke_flush, diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 88daa57c..00873485 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -61,7 +61,7 @@ static void i915_perform_flush(drm_device_t * dev) * First update fences with the current breadcrumb. */ - diff = sequence - fc->last_exe_flush; + diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK; if (diff < driver->wrap_diff && diff != 0) { drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); } diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index ebb184cc..dbc5f959 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -445,10 +445,12 @@ void i915_emit_breadcrumb(drm_device_t *dev) drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; + if (++dev_priv->counter > BREADCRUMB_MASK) { + dev_priv->counter = 1; + DRM_DEBUG("Breadcrumb counter wrapped around\n"); + } - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; BEGIN_LP_RING(4); OUT_RING(CMD_STORE_DWORD_IDX); diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 9deee8ec..e0432996 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -363,6 +363,9 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define BREADCRUMB_BITS 31 +#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) + #define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) #endif From 638ebbab54a48004c2e1d9cc5498e1dec976911e Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 18 Jun 2007 12:45:20 +1000 Subject: [PATCH 016/437] fix radeon setparam on 32/64 systems, harder. Commit 9b01bd5b284bbf519b726b39f1352023cb5e9e69 introduced a compat_ioctl handler for RADEON_SETPARAM, the sole purpose of which was to handle the fact that on i386, alignof(uint64_t)==4. Unfortunately, this handler was installed for _all_ 64-bit architectures, instead of only x86_64 and ia64. And thus it breaks 32-bit compatibility on every other arch, where 64-bit integers are aligned to 8 bytes in 32-bit mode just the same as in 64-bit mode. Arnd has a cunning plan to use 'compat_u64' with appropriate alignment attributes according to the 32-bit ABI, but for now let's just make the compat_radeon_cp_setparam routine entirely disappear on 64-bit machines whose 32-bit compat support isn't for i386. It would be a no-op with compat_u64 anyway. Signed-off-by: David Woodhouse --- linux-core/radeon_ioc32.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/linux-core/radeon_ioc32.c b/linux-core/radeon_ioc32.c index 1be50bd9..bc8aa35a 100644 --- a/linux-core/radeon_ioc32.c +++ b/linux-core/radeon_ioc32.c @@ -349,6 +349,8 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); } +/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ +#if defined (CONFIG_X86_64) || defined(CONFIG_IA64) typedef struct drm_radeon_setparam32 { int param; u64 value; @@ -373,7 +375,9 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); } - +#else +#define compat_radeon_cp_setparam NULL +#endif /* X86_64 || IA64 */ drm_ioctl_compat_t *radeon_compat_ioctls[] = { [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, From 8038e7b60f62e51b7f134141fd58f334eec31a10 Mon Sep 17 00:00:00 2001 From: Oliver McFadden Date: Mon, 18 Jun 2007 08:36:50 +0000 Subject: [PATCH 017/437] r300: Synchronized the register defines file again. --- shared-core/r300_reg.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/shared-core/r300_reg.h b/shared-core/r300_reg.h index 0a31f0b9..3ce09c16 100644 --- a/shared-core/r300_reg.h +++ b/shared-core/r300_reg.h @@ -116,6 +116,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ #define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 + /* each of the following is 3 bits wide, specifies number + of components */ # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 @@ -299,6 +301,18 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_221C_NORMAL 0x00000000 # define R300_221C_CLEAR 0x0001C000 +/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first + * plane is per-pixel and the second plane is per-vertex. + * + * This was determined by experimentation alone but I believe it is correct. + * + * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest. + */ +#define R300_VAP_CLIP_X_0 0x2220 +#define R300_VAP_CLIP_X_1 0x2224 +#define R300_VAP_CLIP_Y_0 0x2228 +#define R300_VAP_CLIP_Y_1 0x2230 + /* gap */ /* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between @@ -967,7 +981,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. * first node is stored in NODE_2, the second node is stored in NODE_3. * * Offsets are relative to the master offset from PFS_CNTL_2. - * LAST_NODE is set for the last node, and only for the last node. */ #define R300_PFS_NODE_0 0x4610 #define R300_PFS_NODE_1 0x4614 @@ -981,7 +994,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12) # define R300_PFS_NODE_TEX_END_SHIFT 17 # define R300_PFS_NODE_TEX_END_MASK (31 << 17) -/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */ # define R300_PFS_NODE_OUTPUT_COLOR (1 << 22) # define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23) @@ -1591,6 +1603,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_EB_UNK1_SHIFT 24 # define R300_EB_UNK1 (0x80<<24) # define R300_EB_UNK2 0x0810 +#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400 #define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 /* END: Packet 3 commands */ From 215787e4297ed4f6364bcc98869a347fc4cad00d Mon Sep 17 00:00:00 2001 From: Oliver McFadden Date: Mon, 18 Jun 2007 08:42:46 +0000 Subject: [PATCH 018/437] r300: Registers 0x2220-0x2230 are known as R300_VAP_CLIP_X_0-R300_VAP_CLIP_Y_1. --- shared-core/r300_cmdbuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index d3c52d43..c02334a2 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -155,7 +155,7 @@ void r300_init_reg_flags(void) ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2); ADD_RANGE(0x21DC, 1); ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); - ADD_RANGE(0x2220, 4); + ADD_RANGE(R300_VAP_CLIP_X_0, 4); ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); From 213732af4381819113756d6d920794cf0dc30dd6 Mon Sep 17 00:00:00 2001 From: Oliver McFadden Date: Thu, 21 Jun 2007 14:32:58 +0000 Subject: [PATCH 019/437] r300: Allow writes to R300_VAP_PVS_WAITIDLE. --- shared-core/r300_cmdbuf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index c02334a2..0cd5d7e2 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -156,6 +156,7 @@ void r300_init_reg_flags(void) ADD_RANGE(0x21DC, 1); ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); ADD_RANGE(R300_VAP_CLIP_X_0, 4); + ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1); ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); From 40f6a696cb22ffa064f78198a7a241015d365967 Mon Sep 17 00:00:00 2001 From: Oliver McFadden Date: Thu, 21 Jun 2007 14:35:11 +0000 Subject: [PATCH 020/437] r300: Synchronized the register defines file; documentation changes. --- shared-core/r300_reg.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/shared-core/r300_reg.h b/shared-core/r300_reg.h index 3ce09c16..e59919be 100644 --- a/shared-core/r300_reg.h +++ b/shared-core/r300_reg.h @@ -336,13 +336,15 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. * The meaning of the two UNKNOWN fields is obviously not known. However, * experiments so far have shown that both *must* point to an instruction * inside the vertex program, otherwise the GPU locks up. + * * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and - * CNTL_1_UNKNOWN points to instruction where last write to position takes - * place. + * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to + * position takes place. + * * Most likely this is used to ignore rest of the program in cases * where group of verts arent visible. For some reason this "section" * is sometimes accepted other instruction that have no relationship with - *position calculations. + * position calculations. */ #define R300_VAP_PVS_CNTL_1 0x22D0 # define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 From 068ffc1e1bf5607f836839a1fc621a95547251e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 22 Jun 2007 11:55:26 +0200 Subject: [PATCH 021/437] radeon: Acknowledge all interrupts we're interested in. Failure to do so was probably the root cause of fd.o bug 11287. --- shared-core/radeon_irq.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 5151b4d6..a4be86e3 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -72,10 +72,14 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) /* Only consider the bits we're interested in - others could be used * outside the DRM */ - stat = radeon_acknowledge_irqs(dev_priv, dev_priv->irq_enable_reg); + stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | + RADEON_CRTC_VBLANK_STAT | + RADEON_CRTC2_VBLANK_STAT)); if (!stat) return IRQ_NONE; + stat &= dev_priv->irq_enable_reg; + /* SW interrupt */ if (stat & RADEON_SW_INT_TEST) { DRM_WAKEUP(&dev_priv->swi_queue); @@ -265,7 +269,8 @@ void radeon_driver_irq_preinstall(drm_device_t * dev) /* Clear bits if they're already high */ radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | - RADEON_CRTC_VBLANK_STAT)); + RADEON_CRTC_VBLANK_STAT | + RADEON_CRTC2_VBLANK_STAT)); } void radeon_driver_irq_postinstall(drm_device_t * dev) From 5c7c07fd49b154623f9dfdab1fe1f2cda8508036 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:54:36 +1000 Subject: [PATCH 022/437] nouveau: rename engtab functions --- shared-core/nouveau_drv.h | 30 +++++----- shared-core/nouveau_state.c | 110 ++++++++++++++++++------------------ 2 files changed, 70 insertions(+), 70 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index debee8e4..89b284c3 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -101,29 +101,29 @@ struct nouveau_config { struct nouveau_engine_func { struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Mc; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } mc; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Timer; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } timer; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Fb; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } fb; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Graph; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } graph; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Fifo; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } fifo; }; typedef struct drm_nouveau_private { diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index e7930b9e..592797c3 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -80,64 +80,64 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) switch (dev_priv->chipset & 0xf0) { case 0x00: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv04_fb_init; - engine->Fb.Takedown = nv04_fb_takedown; - engine->Graph.Init = nv04_graph_init; - engine->Graph.Takedown = nv04_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv04_fb_init; + engine->fb.takedown = nv04_fb_takedown; + engine->graph.init = nv04_graph_init; + engine->graph.takedown = nv04_graph_takedown; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; break; case 0x10: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv10_fb_init; - engine->Fb.Takedown = nv10_fb_takedown; - engine->Graph.Init = nv10_graph_init; - engine->Graph.Takedown = nv10_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.init = nv10_graph_init; + engine->graph.takedown = nv10_graph_takedown; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; break; case 0x20: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv10_fb_init; - engine->Fb.Takedown = nv10_fb_takedown; - engine->Graph.Init = nv20_graph_init; - engine->Graph.Takedown = nv20_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.init = nv20_graph_init; + engine->graph.takedown = nv20_graph_takedown; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; break; case 0x30: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv10_fb_init; - engine->Fb.Takedown = nv10_fb_takedown; - engine->Graph.Init = nv30_graph_init; - engine->Graph.Takedown = nv30_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.init = nv30_graph_init; + engine->graph.takedown = nv30_graph_takedown; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; break; case 0x40: - engine->Mc.Init = nv40_mc_init; - engine->Mc.Takedown = nv40_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv40_fb_init; - engine->Fb.Takedown = nv40_fb_takedown; - engine->Graph.Init = nv40_graph_init; - engine->Graph.Takedown = nv40_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv40_mc_init; + engine->mc.takedown = nv40_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv40_fb_init; + engine->fb.takedown = nv40_fb_takedown; + engine->graph.init = nv40_graph_init; + engine->graph.takedown = nv40_graph_takedown; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; break; case 0x50: default: @@ -184,23 +184,23 @@ static int nouveau_card_init(drm_device_t *dev) /* Parse BIOS tables / Run init tables? */ /* PMC */ - ret = engine->Mc.Init(dev); + ret = engine->mc.init(dev); if (ret) return ret; /* PTIMER */ - ret = engine->Timer.Init(dev); + ret = engine->timer.init(dev); if (ret) return ret; /* PFB */ - ret = engine->Fb.Init(dev); + ret = engine->fb.init(dev); if (ret) return ret; /* PGRAPH */ - ret = engine->Graph.Init(dev); + ret = engine->graph.init(dev); if (ret) return ret; /* PFIFO */ - ret = engine->Fifo.Init(dev); + ret = engine->fifo.init(dev); if (ret) return ret; /* what about PVIDEO/PCRTC/PRAMDAC etc? */ From 24b71c318a00dfbb18b2bbf6652e3b781175c430 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:54:51 +1000 Subject: [PATCH 023/437] nouveau: prototype PFIFO/PGRAPH engtab API --- shared-core/nouveau_drv.h | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 89b284c3..093a93c5 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -116,13 +116,23 @@ struct nouveau_engine_func { } fb; struct { - int (*init)(drm_device_t *dev); - void (*takedown)(drm_device_t *dev); + int (*init)(drm_device_t *); + void (*takedown)(drm_device_t *); + + int (*create_context)(drm_device_t *, int channel); + void (*destroy_context)(drm_device_t *, int channel); + int (*load_context)(drm_device_t *, int channel); + int (*save_context)(drm_device_t *, int channel); } graph; struct { - int (*init)(drm_device_t *dev); - void (*takedown)(drm_device_t *dev); + int (*init)(drm_device_t *); + void (*takedown)(drm_device_t *); + + int (*create_context)(drm_device_t *, int channel); + void (*destroy_context)(drm_device_t *, int channel); + int (*load_context)(drm_device_t *, int channel); + int (*save_context)(drm_device_t *, int channel); } fifo; }; From 9dbf322d26642f9e671f144b34e7cd7d295e9b8e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:55:06 +1000 Subject: [PATCH 024/437] nouveau: (mostly) hook up put_base again --- shared-core/nouveau_drv.h | 1 + shared-core/nouveau_fifo.c | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 093a93c5..da604d37 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -83,6 +83,7 @@ struct nouveau_fifo /* dma object for the command buffer itself */ struct mem_block *cmdbuf_mem; struct nouveau_object *cmdbuf_obj; + uint32_t pushbuf_base; /* PGRAPH context, for cards that keep it in RAMIN */ struct mem_block *ramin_grctx; /* objects belonging to this fifo */ diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 8e66ca2e..cc4ff127 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -232,6 +232,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) return DRM_ERR(ENOMEM); } + dev_priv->fifos[channel].pushbuf_base = 0; dev_priv->fifos[channel].cmdbuf_mem = cb; dev_priv->fifos[channel].cmdbuf_obj = cb_dma; return 0; @@ -460,7 +461,7 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) { int ret; drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *cb_obj; + struct nouveau_fifo *chan; int channel; /* @@ -478,14 +479,16 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) if (channel==nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); (*chan_ret) = channel; + chan = &dev_priv->fifos[channel]; DRM_INFO("Allocating FIFO number %d\n", channel); /* that fifo is used */ - dev_priv->fifos[channel].used = 1; - dev_priv->fifos[channel].filp = filp; + chan->used = 1; + chan->filp = filp; + /* FIFO has no objects yet */ - dev_priv->fifos[channel].objs = NULL; + chan->objs = NULL; /* allocate a command buffer, and create a dma object for the gpu */ ret = nouveau_fifo_cmdbuf_alloc(dev, channel); @@ -493,7 +496,6 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) nouveau_fifo_free(dev, channel); return ret; } - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; nouveau_wait_for_idle(dev); @@ -548,8 +550,8 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<pushbuf_base); + NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); /* If this is the first channel, setup PFIFO ourselves. For any * other case, the GPU will handle this when it switches contexts. @@ -557,10 +559,8 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) if (dev_priv->fifo_alloc_count == 0) { nouveau_fifo_context_restore(dev, channel); if (dev_priv->card_type >= NV_30) { - struct nouveau_fifo *chan; uint32_t inst; - chan = &dev_priv->fifos[channel]; inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); @@ -679,8 +679,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) if (res) return res; - /* this should probably disappear in the next abi break? */ - init.put_base = 0; + init.put_base = dev_priv->fifos[init.channel].pushbuf_base; /* make the fifo available to user space */ /* first, the fifo control regs */ From 0afb3b518e1ece820b01f3eea64b25cff01c97bc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:55:23 +1000 Subject: [PATCH 025/437] nouveau: split PFIFO/PGRAPH context creation --- shared-core/nouveau_fifo.c | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index cc4ff127..5bbd1c02 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -505,18 +505,16 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); - /* Construct inital RAMFC for new channel */ + /* Create a graphics context for new channel */ switch(dev_priv->card_type) { case NV_04: case NV_05: nv04_graph_context_create(dev, channel); - nouveau_nv04_context_init(dev, channel); break; case NV_10: case NV_17: nv10_graph_context_create(dev, channel); - nouveau_nv10_context_init(dev, channel); break; case NV_20: ret = nv20_graph_context_create(dev, channel); @@ -524,7 +522,6 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) nouveau_fifo_free(dev, channel); return ret; } - nouveau_nv10_context_init(dev, channel); break; case NV_30: ret = nv30_graph_context_create(dev, channel); @@ -532,18 +529,45 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) nouveau_fifo_free(dev, channel); return ret; } - nouveau_nv30_context_init(dev, channel); break; case NV_40: case NV_44: - case NV_50: ret = nv40_graph_context_create(dev, channel); if (ret) { nouveau_fifo_free(dev, channel); return ret; } - nouveau_nv40_context_init(dev, channel); break; + default: + DRM_ERROR("grctx: unknown card type\n"); + nouveau_fifo_free(dev, channel); + return DRM_ERR(EINVAL); + } + + /* Construct inital RAMFC for new channel */ + switch (dev_priv->card_type) { + case NV_04: + case NV_05: + nouveau_nv04_context_init(dev, channel); + break; + case NV_10: + case NV_17: + nouveau_nv10_context_init(dev, channel); + break; + case NV_20: + nouveau_nv10_context_init(dev, channel); + break; + case NV_30: + nouveau_nv30_context_init(dev, channel); + break; + case NV_40: + case NV_44: + nouveau_nv40_context_init(dev, channel); + break; + default: + DRM_ERROR("fifoctx: unknown card type\n"); + nouveau_fifo_free(dev, channel); + return DRM_ERR(EINVAL); } /* enable the fifo dma operation */ From f2e64d527699751d6b64698495ae1d48eeee6cf7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:56:01 +1000 Subject: [PATCH 026/437] nouveau: NV4X PFIFO engtab functions --- linux-core/Makefile.kernel | 1 + linux-core/nv40_fifo.c | 1 + shared-core/nouveau_drv.h | 10 +- shared-core/nouveau_fifo.c | 100 ++++++------------- shared-core/nouveau_reg.h | 4 +- shared-core/nouveau_state.c | 4 + shared-core/nv40_fifo.c | 193 ++++++++++++++++++++++++++++++++++++ 7 files changed, 237 insertions(+), 76 deletions(-) create mode 120000 linux-core/nv40_fifo.c create mode 100644 shared-core/nv40_fifo.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6f5b021b..3e78b6d7 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,6 +25,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ + nv40_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv40_fifo.c b/linux-core/nv40_fifo.c new file mode 120000 index 00000000..cc71e7a4 --- /dev/null +++ b/linux-core/nv40_fifo.c @@ -0,0 +1 @@ +../shared-core/nv40_fifo.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index da604d37..7c29a882 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -100,7 +100,7 @@ struct nouveau_config { } cmdbuf; }; -struct nouveau_engine_func { +typedef struct nouveau_engine_func { struct { int (*init)(drm_device_t *dev); void (*takedown)(drm_device_t *dev); @@ -135,7 +135,7 @@ struct nouveau_engine_func { int (*load_context)(drm_device_t *, int channel); int (*save_context)(drm_device_t *, int channel); } fifo; -}; +} nouveau_engine_func_t; typedef struct drm_nouveau_private { /* the card type, takes NV_* as values */ @@ -255,6 +255,12 @@ extern void nv10_fb_takedown(drm_device_t *dev); extern int nv40_fb_init(drm_device_t *dev); extern void nv40_fb_takedown(drm_device_t *dev); +/* nv40_fifo.c */ +extern int nv40_fifo_create_context(drm_device_t *, int channel); +extern void nv40_fifo_destroy_context(drm_device_t *, int channel); +extern int nv40_fifo_load_context(drm_device_t *, int channel); +extern int nv40_fifo_save_context(drm_device_t *, int channel); + /* nv04_graph.c */ extern void nouveau_nv04_context_switch(drm_device_t *dev); extern int nv04_graph_init(drm_device_t *dev); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 5bbd1c02..50f094b9 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -358,64 +358,6 @@ static void nouveau_nv10_context_save(drm_device_t *dev) #endif #undef RAMFC_WR -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val)) -static void nouveau_nv40_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t fifoctx, cb_inst, grctx_inst; - int i; - - cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; - for (i=0;i<128;i+=4) - NV_WRITE(fifoctx + i, 0); - - /* Fill entries that are seen filled in dumps of nvidia driver just - * after channel's is put into DMA mode - */ - RAMFC_WR(DMA_INSTANCE , cb_inst); - RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x30000000 /* no idea.. */); - RAMFC_WR(GRCTX_INSTANCE, grctx_inst); - RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); -} - -static void nouveau_nv40_context_save(drm_device_t *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; - int channel; - - channel = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; - - RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); - RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); - RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); - RAMFC_WR(DMA_DCOUNT , NV_READ(NV10_PFIFO_CACHE1_DMA_DCOUNT)); - RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); - RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); - RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); - RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); - RAMFC_WR(DMA_TIMESLICE , NV_READ(NV04_PFIFO_DMA_TIMESLICE) & 0x1FFFF); - RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); -} -#undef RAMFC_WR - /* This function should load values from RAMFC into PFIFO, but for now * it just clobbers PFIFO with what nouveau_fifo_alloc used to setup * unconditionally. @@ -461,6 +403,7 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) { int ret; drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; struct nouveau_fifo *chan; int channel; @@ -560,14 +503,17 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) case NV_30: nouveau_nv30_context_init(dev, channel); break; - case NV_40: - case NV_44: - nouveau_nv40_context_init(dev, channel); - break; default: - DRM_ERROR("fifoctx: unknown card type\n"); - nouveau_fifo_free(dev, channel); - return DRM_ERR(EINVAL); + if (!engine->fifo.create_context) { + DRM_ERROR("fifo.create_context == NULL\n"); + return DRM_ERR(EINVAL); + } + + ret = engine->fifo.create_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } } /* enable the fifo dma operation */ @@ -581,7 +527,11 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) * other case, the GPU will handle this when it switches contexts. */ if (dev_priv->fifo_alloc_count == 0) { - nouveau_fifo_context_restore(dev, channel); + if (engine->fifo.load_context) + engine->fifo.load_context(dev, channel); + else + nouveau_fifo_context_restore(dev, channel); + if (dev_priv->card_type >= NV_30) { uint32_t inst; @@ -615,6 +565,7 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) void nouveau_fifo_free(drm_device_t* dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; int i; int ctx_size = nouveau_fifo_ctx_size(dev); @@ -629,12 +580,17 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) // FIXME XXX needs more code /* Clean RAMFC */ - for (i=0;iramfc_offset + - channel*ctx_size + i)); - NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + - channel*ctx_size + i, 0); + if (engine->fifo.destroy_context) + engine->fifo.destroy_context(dev, channel); + else { + for (i=0;iramfc_offset + + channel*ctx_size + i)); + NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + + channel*ctx_size + i, 0); + } } /* Cleanup PGRAPH state */ diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index ea4a2f6b..07c54a92 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -462,6 +462,6 @@ #define NV40_RAMFC_UNK_40 0x40 #define NV40_RAMFC_UNK_44 0x44 #define NV40_RAMFC_UNK_48 0x48 -#define NV40_RAMFC_2088 0x4C -#define NV40_RAMFC_3300 0x50 +#define NV40_RAMFC_UNK_4C 0x4C +#define NV40_RAMFC_UNK_50 0x50 diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 592797c3..42860e9a 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -138,6 +138,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->graph.takedown = nv40_graph_takedown; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv40_fifo_create_context; + engine->fifo.destroy_context = nv40_fifo_destroy_context; + engine->fifo.load_context = nv40_fifo_load_context; + engine->fifo.save_context = nv40_fifo_save_context; break; case 0x50: default: diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c new file mode 100644 index 00000000..b67a7e58 --- /dev/null +++ b/shared-core/nv40_fifo.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_READ (fifoctx + NV40_RAMFC_##offset) + +int +nv40_fifo_create_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t fifoctx, grctx, pushbuf; + int i; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + for (i=0;i<128;i+=4) + NV_WRITE(fifoctx + i, 0); + + grctx = nouveau_chip_instance_get(dev, chan->ramin_grctx); + pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , chan->pushbuf_base); + RAMFC_WR(DMA_GET , chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE , pushbuf); + RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0x30000000 /* no idea.. */); + RAMFC_WR(DMA_SUBROUTINE, 0); + RAMFC_WR(GRCTX_INSTANCE, grctx); + RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); + + return 0; +} + +void +nv40_fifo_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int i; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + for (i=0;i<128;i+=4) + NV_WRITE(fifoctx + i, 0); +} + +int +nv40_fifo_load_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + uint32_t tmp, tmp2; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); + NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); + NV_WRITE(NV10_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); + + /* No idea what 0x2058 is.. */ + tmp = RAMFC_RD(DMA_FETCH); + tmp2 = NV_READ(0x2058) & 0xFFF; + tmp2 |= (tmp & 0x30000000); + NV_WRITE(0x2058, tmp2); + tmp &= ~0x30000000; + NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp); + + NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); + NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); + NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); + NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE)); + NV_WRITE(0x32e4, RAMFC_RD(UNK_40)); + /* NVIDIA does this next line twice... */ + NV_WRITE(0x32e8, RAMFC_RD(UNK_44)); + NV_WRITE(0x2088, RAMFC_RD(UNK_4C)); + NV_WRITE(0x3300, RAMFC_RD(UNK_50)); + + /* not sure what part is PUT, and which is GET.. never seen a non-zero + * value appear in a mmio-trace yet.. + */ +#if 0 + tmp = NV_READ(UNK_84); + NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???); + NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???); +#endif + + /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ + tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; + tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF; + NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); + + /* Set channel active, and in DMA mode */ + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | channel); + /* Reset DMA_CTL_AT_INFO to INVALID */ + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv40_fifo_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + uint32_t tmp; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + + RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); + RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); + RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); + RAMFC_WR(DMA_DCOUNT , NV_READ(NV10_PFIFO_CACHE1_DMA_DCOUNT)); + RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); + + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH); + tmp |= NV_READ(0x2058) & 0x30000000; + RAMFC_WR(DMA_FETCH , tmp); + + RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); + RAMFC_WR(ACQUIRE_TIMESTAMP, tmp); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); + + /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something + * more involved depending on the value of 0x3228? + */ + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + + RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); + + /* No idea what the below is for exactly, ripped from a mmio-trace */ + RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); + + /* NVIDIA do this next line twice.. bug? */ + RAMFC_WR(UNK_44 , NV_READ(0x32e8)); + RAMFC_WR(UNK_4C , NV_READ(0x2088)); + RAMFC_WR(UNK_50 , NV_READ(0x3300)); + +#if 0 /* no real idea which is PUT/GET in UNK_48.. */ + tmp = NV_READ(NV04_PFIFO_CACHE1_GET); + tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16); + RAMFC_WR(UNK_48 , tmp); +#endif + + return 0; +} + From acb710d1a59788a0205cd0daf0859864e683fbd2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:56:40 +1000 Subject: [PATCH 027/437] nouveau: NV4X PGRAPH engtab functions --- shared-core/nouveau_drv.h | 11 +-- shared-core/nouveau_fifo.c | 24 +++--- shared-core/nouveau_state.c | 4 + shared-core/nv40_graph.c | 150 +++++++++++++++++++++--------------- 4 files changed, 115 insertions(+), 74 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 7c29a882..07c40107 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -285,11 +285,12 @@ extern void nv30_graph_takedown(drm_device_t *dev); extern int nv30_graph_context_create(drm_device_t *dev, int channel); /* nv40_graph.c */ -extern int nv40_graph_init(drm_device_t *dev); -extern void nv40_graph_takedown(drm_device_t *dev); -extern int nv40_graph_context_create(drm_device_t *dev, int channel); -extern void nv40_graph_context_save_current(drm_device_t *dev); -extern void nv40_graph_context_restore(drm_device_t *dev, int channel); +extern int nv40_graph_init(drm_device_t *); +extern void nv40_graph_takedown(drm_device_t *); +extern int nv40_graph_create_context(drm_device_t *, int channel); +extern void nv40_graph_destroy_context(drm_device_t *, int channel); +extern int nv40_graph_load_context(drm_device_t *, int channel); +extern int nv40_graph_save_context(drm_device_t *, int channel); /* nv04_mc.c */ extern int nv04_mc_init(drm_device_t *dev); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 50f094b9..527a71ae 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -473,18 +473,17 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) return ret; } break; - case NV_40: - case NV_44: - ret = nv40_graph_context_create(dev, channel); + default: + if (!engine->graph.create_context) { + DRM_ERROR("graph.create_context == NULL\n"); + return DRM_ERR(EINVAL); + } + ret = engine->graph.create_context(dev, channel); if (ret) { nouveau_fifo_free(dev, channel); return ret; } break; - default: - DRM_ERROR("grctx: unknown card type\n"); - nouveau_fifo_free(dev, channel); - return DRM_ERR(EINVAL); } /* Construct inital RAMFC for new channel */ @@ -532,6 +531,13 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) else nouveau_fifo_context_restore(dev, channel); + if (engine->graph.load_context) { + ret = engine->graph.load_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + } else if (dev_priv->card_type >= NV_30) { uint32_t inst; @@ -594,8 +600,8 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) } /* Cleanup PGRAPH state */ - if (dev_priv->card_type >= NV_40) - nouveau_instmem_free(dev, chan->ramin_grctx); + if (engine->graph.destroy_context) + engine->graph.destroy_context(dev, channel); else if (dev_priv->card_type >= NV_30) { } else if (dev_priv->card_type >= NV_20) { diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 42860e9a..d113f234 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -136,6 +136,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fb.takedown = nv40_fb_takedown; engine->graph.init = nv40_graph_init; engine->graph.takedown = nv40_graph_takedown; + engine->graph.create_context = nv40_graph_create_context; + engine->graph.destroy_context = nv40_graph_destroy_context; + engine->graph.load_context = nv40_graph_load_context; + engine->graph.save_context = nv40_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv40_fifo_create_context; diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 792734ed..245da54d 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1,7 +1,32 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" -#include "nouveau_drm.h" /* The sizes are taken from the difference between the start of two * grctx addresses while running the nvidia driver. Probably slightly @@ -755,7 +780,7 @@ static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } int -nv40_graph_context_create(drm_device_t *dev, int channel) +nv40_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; @@ -808,89 +833,94 @@ nv40_graph_context_create(drm_device_t *dev, int channel) return 0; } +void +nv40_graph_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->ramin_grctx) { + nouveau_instmem_free(dev, chan->ramin_grctx); + chan->ramin_grctx = NULL; + } +} + +static int +nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t old_cp, tv = 1000; + int i; + + old_cp = NV_READ(0x400784); + NV_WRITE(0x400784, inst); + NV_WRITE(0x400310, save ? 0x20 : 0x40); + NV_WRITE(0x400304, 1); + + for (i = 0; i < tv; i++) { + if (NV_READ(0x40030c) == 0) + break; + } + NV_WRITE(0x400784, old_cp); + + if (i == tv) { + DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); + DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030c)); + return DRM_ERR(EBUSY); + } + + return 0; +} + /* Save current context (from PGRAPH) into the channel's context *XXX: fails sometimes, not sure why.. */ -void -nv40_graph_context_save_current(drm_device_t *dev) +int +nv40_graph_save_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; - uint32_t instance; - int i; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t inst; - NV_WRITE(NV04_PGRAPH_FIFO, 0); + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - instance = NV_READ(0x40032C) & 0xFFFFF; - if (!instance) { - NV_WRITE(NV04_PGRAPH_FIFO, 1); - return; - } - - NV_WRITE(0x400784, instance); - NV_WRITE(0x400310, NV_READ(0x400310) | 0x20); - NV_WRITE(0x400304, 1); - /* just in case, we don't want to spin in-kernel forever */ - for (i=0; i<1000; i++) { - if (NV_READ(0x40030C) == 0) - break; - } - if (i==1000) { - DRM_ERROR("failed to save current grctx to ramin\n"); - DRM_ERROR("instance = 0x%08x\n", NV_READ(0x40032C)); - DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); - NV_WRITE(NV04_PGRAPH_FIFO, 1); - return; - } - - NV_WRITE(NV04_PGRAPH_FIFO, 1); + return nv40_graph_transfer_context(dev, inst, 1); } /* Restore the context for a specific channel into PGRAPH * XXX: fails sometimes.. not sure why */ -void -nv40_graph_context_restore(drm_device_t *dev, int channel) +int +nv40_graph_load_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t instance; - int i; + uint32_t inst; + int ret; - instance = nouveau_chip_instance_get(dev, chan->ramin_grctx); - - NV_WRITE(NV04_PGRAPH_FIFO, 0); - NV_WRITE(0x400784, instance); - NV_WRITE(0x400310, NV_READ(0x400310) | 0x40); - NV_WRITE(0x400304, 1); - /* just in case, we don't want to spin in-kernel forever */ - for (i=0; i<1000; i++) { - if (NV_READ(0x40030C) == 0) - break; - } - if (i==1000) { - DRM_ERROR("failed to restore grctx for ch%d to PGRAPH\n", - channel); - DRM_ERROR("instance = 0x%08x\n", instance); - DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); - NV_WRITE(NV04_PGRAPH_FIFO, 1); - return; - } + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + ret = nv40_graph_transfer_context(dev, inst, 0); + if (ret) + return ret; /* 0x40032C, no idea of it's exact function. Could simply be a * record of the currently active PGRAPH context. It's currently * unknown as to what bit 24 does. The nv ddx has it set, so we will * set it here too. */ - NV_WRITE(0x40032C, instance | 0x01000000); + NV_WRITE(0x400784, inst); + NV_WRITE(0x40032C, inst | 0x01000000); /* 0x32E0 records the instance address of the active FIFO's PGRAPH * context. If at any time this doesn't match 0x40032C, you will * recieve PGRAPH_INTR_CONTEXT_SWITCH */ - NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, instance); - NV_WRITE(NV04_PGRAPH_FIFO, 1); + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); + return 0; } /* Some voodoo that makes context switching work without the binary driver From 05d86d950a10b77ffaa708e9d89b2a87c11fed01 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:57:09 +1000 Subject: [PATCH 028/437] nouveau: NV04 PFIFO engtab functions --- linux-core/Makefile.kernel | 2 +- linux-core/nv04_fifo.c | 1 + shared-core/nouveau_drv.h | 6 ++ shared-core/nouveau_fifo.c | 32 --------- shared-core/nouveau_reg.h | 5 +- shared-core/nouveau_state.c | 4 ++ shared-core/nv04_fifo.c | 126 ++++++++++++++++++++++++++++++++++++ shared-core/nv40_fifo.c | 4 +- 8 files changed, 144 insertions(+), 36 deletions(-) create mode 120000 linux-core/nv04_fifo.c create mode 100644 shared-core/nv04_fifo.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 3e78b6d7..45d2dc46 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,7 +25,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ - nv40_fifo.o \ + nv04_fifo.o nv40_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv04_fifo.c b/linux-core/nv04_fifo.c new file mode 120000 index 00000000..d10beb19 --- /dev/null +++ b/linux-core/nv04_fifo.c @@ -0,0 +1 @@ +../shared-core/nv04_fifo.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 07c40107..3e32c2db 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -255,6 +255,12 @@ extern void nv10_fb_takedown(drm_device_t *dev); extern int nv40_fb_init(drm_device_t *dev); extern void nv40_fb_takedown(drm_device_t *dev); +/* nv04_fifo.c */ +extern int nv04_fifo_create_context(drm_device_t *dev, int channel); +extern void nv04_fifo_destroy_context(drm_device_t *dev, int channel); +extern int nv04_fifo_load_context(drm_device_t *dev, int channel); +extern int nv04_fifo_save_context(drm_device_t *dev, int channel); + /* nv40_fifo.c */ extern int nv40_fifo_create_context(drm_device_t *, int channel); extern void nv40_fifo_destroy_context(drm_device_t *, int channel); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 527a71ae..58408a1e 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -238,34 +238,6 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) return 0; } -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val)) -static void nouveau_nv04_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *cb_obj; - uint32_t fifoctx, ctx_size = 32; - int i; - - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; - - fifoctx=NV_RAMIN+dev_priv->ramfc_offset+channel*ctx_size; - - // clear the fifo context - for(i=0;iinstance)); - - RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); -} -#undef RAMFC_WR - #define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val)) static void nouveau_nv10_context_init(drm_device_t *dev, int channel) { @@ -488,10 +460,6 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) /* Construct inital RAMFC for new channel */ switch (dev_priv->card_type) { - case NV_04: - case NV_05: - nouveau_nv04_context_init(dev, channel); - break; case NV_10: case NV_17: nouveau_nv10_context_init(dev, channel); diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 07c54a92..ba61f997 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -404,7 +404,7 @@ #define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C #define NV03_PFIFO_CACHE1_GET 0x00003270 #define NV04_PFIFO_CACHE1_ENGINE 0x00003280 -#define NV10_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 +#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 #define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 #define NV40_PFIFO_UNK32E4 0x000032E4 #define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) @@ -427,7 +427,10 @@ #define NV04_RAMFC_DMA_PUT 0x00 #define NV04_RAMFC_DMA_GET 0x04 #define NV04_RAMFC_DMA_INSTANCE 0x08 +#define NV04_RAMFC_DMA_STATE 0x0C #define NV04_RAMFC_DMA_FETCH 0x10 +#define NV04_RAMFC_ENGINE 0x14 +#define NV04_RAMFC_PULL1_ENGINE 0x18 #define NV10_RAMFC_DMA_PUT 0x00 #define NV10_RAMFC_DMA_GET 0x04 diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index d113f234..ed200e85 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -90,6 +90,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->graph.takedown = nv04_graph_takedown; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv04_fifo_create_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv04_fifo_load_context; + engine->fifo.save_context = nv04_fifo_save_context; break; case 0x10: engine->mc.init = nv04_mc_init; diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c new file mode 100644 index 00000000..34a497b7 --- /dev/null +++ b/shared-core/nv04_fifo.c @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#define NV04_RAMFC (NV_RAMIN + dev_priv->ramfc_offset) +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_READ(fifoctx + NV04_RAMFC_##offset) +#define NV04_FIFO_CONTEXT_SIZE 32 + +int +nv04_fifo_create_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_object *pb = chan->cmdbuf_obj; + int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + int i; + + if (!pb || !pb->instance) + return DRM_ERR(EINVAL); + + /* Clear RAMFC */ + for (i=0; ipushbuf_base); + RAMFC_WR(DMA_GET, chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE, nouveau_chip_instance_get(dev, pb->instance)); + /* NOTE: nvidia use TRIG_128/SIZE_128/MAX_REQS_8 */ + RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0)); + return 0; +} + +void +nv04_fifo_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int i; + + fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + for (i=0; idev_private; + int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + uint32_t tmp; + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); + + tmp = RAMFC_RD(DMA_INSTANCE); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH)); + NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE)); + NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE)); + + /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv04_fifo_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + uint32_t tmp; + + RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); + RAMFC_WR(DMA_GET, NV04_PFIFO_CACHE1_DMA_GET); + + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; + tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE); + RAMFC_WR(DMA_INSTANCE, tmp); + + RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); + RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); + RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); + + return 0; +} + diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index b67a7e58..9d7afbe5 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -91,7 +91,7 @@ nv40_fifo_load_context(drm_device_t *dev, int channel) NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); - NV_WRITE(NV10_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); /* No idea what 0x2058 is.. */ @@ -152,7 +152,7 @@ nv40_fifo_save_context(drm_device_t *dev, int channel) RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); - RAMFC_WR(DMA_DCOUNT , NV_READ(NV10_PFIFO_CACHE1_DMA_DCOUNT)); + RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT)); RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH); From 341bc7820749024e09275de6e689b10c2908689a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:58:14 +1000 Subject: [PATCH 029/437] nouveau: NV1X/2X/3X PFIFO engtab functions Earlier NV1X chips use the NV04 code, see previous commits about NV10 RAMFC entry size. --- linux-core/Makefile.kernel | 2 +- linux-core/nv10_fifo.c | 1 + shared-core/nouveau_drv.h | 6 ++ shared-core/nouveau_fifo.c | 177 ++---------------------------------- shared-core/nouveau_state.c | 19 ++++ shared-core/nv10_fifo.c | 143 +++++++++++++++++++++++++++++ 6 files changed, 176 insertions(+), 172 deletions(-) create mode 120000 linux-core/nv10_fifo.c create mode 100644 shared-core/nv10_fifo.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 45d2dc46..9427a04b 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,7 +25,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ - nv04_fifo.o nv40_fifo.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv10_fifo.c b/linux-core/nv10_fifo.c new file mode 120000 index 00000000..8630ad04 --- /dev/null +++ b/linux-core/nv10_fifo.c @@ -0,0 +1 @@ +../shared-core/nv10_fifo.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 3e32c2db..c7872c33 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -261,6 +261,12 @@ extern void nv04_fifo_destroy_context(drm_device_t *dev, int channel); extern int nv04_fifo_load_context(drm_device_t *dev, int channel); extern int nv04_fifo_save_context(drm_device_t *dev, int channel); +/* nv10_fifo.c */ +extern int nv10_fifo_create_context(drm_device_t *dev, int channel); +extern void nv10_fifo_destroy_context(drm_device_t *dev, int channel); +extern int nv10_fifo_load_context(drm_device_t *dev, int channel); +extern int nv10_fifo_save_context(drm_device_t *dev, int channel); + /* nv40_fifo.c */ extern int nv40_fifo_create_context(drm_device_t *, int channel); extern void nv40_fifo_destroy_context(drm_device_t *, int channel); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 58408a1e..0a883647 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -238,138 +238,6 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) return 0; } -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val)) -static void nouveau_nv10_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *cb_obj; - uint32_t fifoctx; - int ctx_size = nouveau_fifo_ctx_size(dev); - int i; - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*ctx_size; - - for (i=0;iinstance)); - - RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); -} - -static void nouveau_nv30_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *cb_obj; - uint32_t fifoctx, grctx_inst, cb_inst, ctx_size = 64; - int i; - - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; - cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel * ctx_size; - - for (i = 0; i < ctx_size; i += 4) - NV_WRITE(fifoctx + i, 0); - - RAMFC_WR(REF_CNT, NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); - RAMFC_WR(DMA_INSTANCE, cb_inst); - RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); - RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); - - RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE, NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); -} - -#if 0 -static void nouveau_nv10_context_save(drm_device_t *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; - int channel; - - channel = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; - - RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); - RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); - RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); - RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); - RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); - RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); - RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV10_PFIFO_CACHE1_DMA_SUBROUTINE)); -} -#endif -#undef RAMFC_WR - -/* This function should load values from RAMFC into PFIFO, but for now - * it just clobbers PFIFO with what nouveau_fifo_alloc used to setup - * unconditionally. - */ -static void -nouveau_fifo_context_restore(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t cb_inst; - - cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - - // FIXME check if we need to refill the time quota with something like NV_WRITE(0x204C, 0x0003FFFF); - - if (dev_priv->card_type >= NV_40) - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00010000|channel); - else - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000100|channel); - - NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0 /*RAMFC_DMA_PUT*/); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0 /*RAMFC_DMA_GET*/); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, cb_inst); - NV_WRITE(NV04_PFIFO_SIZE , 0x0000FFFF); - NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF); - - NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); - NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000); - - NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); -} - /* allocates and initializes a fifo for user space consumption */ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) { @@ -459,28 +327,10 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) } /* Construct inital RAMFC for new channel */ - switch (dev_priv->card_type) { - case NV_10: - case NV_17: - nouveau_nv10_context_init(dev, channel); - break; - case NV_20: - nouveau_nv10_context_init(dev, channel); - break; - case NV_30: - nouveau_nv30_context_init(dev, channel); - break; - default: - if (!engine->fifo.create_context) { - DRM_ERROR("fifo.create_context == NULL\n"); - return DRM_ERR(EINVAL); - } - - ret = engine->fifo.create_context(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } + ret = engine->fifo.create_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; } /* enable the fifo dma operation */ @@ -494,10 +344,7 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) * other case, the GPU will handle this when it switches contexts. */ if (dev_priv->fifo_alloc_count == 0) { - if (engine->fifo.load_context) - engine->fifo.load_context(dev, channel); - else - nouveau_fifo_context_restore(dev, channel); + engine->fifo.load_context(dev, channel); if (engine->graph.load_context) { ret = engine->graph.load_context(dev, channel); @@ -553,19 +400,7 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<fifo.destroy_context) - engine->fifo.destroy_context(dev, channel); - else { - for (i=0;iramfc_offset + - channel*ctx_size + i)); - NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + - channel*ctx_size + i, 0); - } - } + engine->fifo.destroy_context(dev, channel); /* Cleanup PGRAPH state */ if (engine->graph.destroy_context) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index ed200e85..55d10b8c 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -106,6 +106,17 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->graph.takedown = nv10_graph_takedown; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; + if (dev_priv->chipset < 0x17) { + engine->fifo.create_context = nv04_fifo_create_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv04_fifo_load_context; + engine->fifo.save_context = nv04_fifo_save_context; + } else { + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.save_context = nv10_fifo_save_context; + } break; case 0x20: engine->mc.init = nv04_mc_init; @@ -118,6 +129,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->graph.takedown = nv20_graph_takedown; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.save_context = nv10_fifo_save_context; break; case 0x30: engine->mc.init = nv04_mc_init; @@ -130,6 +145,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->graph.takedown = nv30_graph_takedown; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.save_context = nv10_fifo_save_context; break; case 0x40: engine->mc.init = nv40_mc_init; diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c new file mode 100644 index 00000000..8dad45aa --- /dev/null +++ b/shared-core/nv10_fifo.c @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_READ (fifoctx + NV10_RAMFC_##offset) +#define NV10_FIFO_CONTEXT_SIZE 64 + +int +nv10_fifo_create_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t fifoctx, pushbuf; + int i; + + pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + for (i=0; ipushbuf_base); + RAMFC_WR(DMA_GET , chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE , pushbuf); + RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0); + + return 0; +} + +void +nv10_fifo_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int i; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + for (i=0; idev_private; + uint32_t fifoctx; + uint32_t tmp; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); + NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); + + tmp = RAMFC_RD(DMA_INSTANCE); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); + NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); + NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); + NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); + NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); + + /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv10_fifo_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + uint32_t tmp; + + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + + RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); + RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); + + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; + tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); + RAMFC_WR(DMA_INSTANCE , tmp); + + RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); + RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); + RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + + return 0; +} + From 5d55b0655cb480b7d6ab4cf2467dac6dc6d8df25 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:58:38 +1000 Subject: [PATCH 030/437] nouveau: NV3X PGRAPH engtab functions --- shared-core/nouveau_drv.h | 7 ++-- shared-core/nouveau_fifo.c | 9 ----- shared-core/nouveau_state.c | 4 +++ shared-core/nv30_graph.c | 66 ++++++++++++++++++++++++++++++++++++- 4 files changed, 74 insertions(+), 12 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index c7872c33..a4a37648 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -292,9 +292,12 @@ extern void nv20_graph_takedown(drm_device_t *dev); extern int nv20_graph_context_create(drm_device_t *dev, int channel); /* nv30_graph.c */ -extern int nv30_graph_init(drm_device_t *dev); +extern int nv30_graph_init(drm_device_t *dev); extern void nv30_graph_takedown(drm_device_t *dev); -extern int nv30_graph_context_create(drm_device_t *dev, int channel); +extern int nv30_graph_create_context(drm_device_t *, int channel); +extern void nv30_graph_destroy_context(drm_device_t *, int channel); +extern int nv30_graph_load_context(drm_device_t *, int channel); +extern int nv30_graph_save_context(drm_device_t *, int channel); /* nv40_graph.c */ extern int nv40_graph_init(drm_device_t *); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 0a883647..1ef5a425 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -306,13 +306,6 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) return ret; } break; - case NV_30: - ret = nv30_graph_context_create(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - break; default: if (!engine->graph.create_context) { DRM_ERROR("graph.create_context == NULL\n"); @@ -388,8 +381,6 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) drm_nouveau_private_t *dev_priv = dev->dev_private; nouveau_engine_func_t *engine = &dev_priv->Engine; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - int i; - int ctx_size = nouveau_fifo_ctx_size(dev); chan->used = 0; DRM_INFO("%s: freeing fifo %d\n", __func__, channel); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 55d10b8c..a997b075 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -143,6 +143,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fb.takedown = nv10_fb_takedown; engine->graph.init = nv30_graph_init; engine->graph.takedown = nv30_graph_takedown; + engine->graph.create_context = nv30_graph_create_context; + engine->graph.destroy_context = nv30_graph_destroy_context; + engine->graph.load_context = nv30_graph_load_context; + engine->graph.save_context = nv30_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv10_fifo_create_context; diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index f4faadd8..9f064a0a 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -100,7 +100,7 @@ static void nv30_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } -int nv30_graph_context_create(drm_device_t *dev, int channel) +int nv30_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; @@ -132,6 +132,70 @@ int nv30_graph_context_create(drm_device_t *dev, int channel) return 0; } +void nv30_graph_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->ramin_grctx) { + nouveau_instmem_free(dev, chan->ramin_grctx); + chan->ramin_grctx = NULL; + } + + INSTANCE_WR(dev_priv->ctx_table, channel, 0); +} + +static int +nouveau_graph_wait_idle(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int tv = 1000; + + while (tv--) { + if (NV_READ(0x400700) == 0) + break; + } + + if (NV_READ(0x400700)) { + DRM_ERROR("timeout!\n"); + return DRM_ERR(EBUSY); + } + return 0; +} + +int nv30_graph_load_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t inst; + + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + NV_WRITE(0x400784, inst); + NV_WRITE(0x400788, 1); + + return nouveau_graph_wait_idle(dev); +} + +int nv30_graph_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t inst; + + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + NV_WRITE(0x400784, inst); + NV_WRITE(0x400788, 2); + + return nouveau_graph_wait_idle(dev); +} + int nv30_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = From 5f05cd7086c54bccf1c2f0b003b78a08dc55472a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 19:00:26 +1000 Subject: [PATCH 031/437] nouveau: NV04/NV10/NV20 PGRAPH engtab functions NV04/NV10 load_context()/save_context() are stubs. I don't know enough about how they work to implement them sanely. The "old" context_switch() code remains hooked up, so it shouldn't break anything. NV20 will probably break if load_context() works. No inital context values are filled in, so when the first channel is created PGRAPH will probably end up having its state zeroed. Some setup from nv20_graph_init() will probably need to be moved to the per-channel context setup. --- shared-core/nouveau_drv.h | 21 +++++++++++++++------ shared-core/nouveau_fifo.c | 32 ++++---------------------------- shared-core/nouveau_state.c | 12 ++++++++++++ shared-core/nv04_graph.c | 17 ++++++++++++++++- shared-core/nv10_graph.c | 17 ++++++++++++++++- shared-core/nv20_graph.c | 33 +++++++++++++++++++++++---------- 6 files changed, 86 insertions(+), 46 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index a4a37648..b3122d8a 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -275,21 +275,30 @@ extern int nv40_fifo_save_context(drm_device_t *, int channel); /* nv04_graph.c */ extern void nouveau_nv04_context_switch(drm_device_t *dev); -extern int nv04_graph_init(drm_device_t *dev); +extern int nv04_graph_init(drm_device_t *dev); extern void nv04_graph_takedown(drm_device_t *dev); -extern int nv04_graph_context_create(drm_device_t *dev, int channel); +extern int nv04_graph_create_context(drm_device_t *dev, int channel); +extern void nv04_graph_destroy_context(drm_device_t *dev, int channel); +extern int nv04_graph_load_context(drm_device_t *dev, int channel); +extern int nv04_graph_save_context(drm_device_t *dev, int channel); /* nv10_graph.c */ extern void nouveau_nv10_context_switch(drm_device_t *dev); -extern int nv10_graph_init(drm_device_t *dev); +extern int nv10_graph_init(drm_device_t *dev); extern void nv10_graph_takedown(drm_device_t *dev); -extern int nv10_graph_context_create(drm_device_t *dev, int channel); +extern int nv10_graph_create_context(drm_device_t *dev, int channel); +extern void nv10_graph_destroy_context(drm_device_t *dev, int channel); +extern int nv10_graph_load_context(drm_device_t *dev, int channel); +extern int nv10_graph_save_context(drm_device_t *dev, int channel); /* nv20_graph.c */ extern void nouveau_nv20_context_switch(drm_device_t *dev); -extern int nv20_graph_init(drm_device_t *dev); +extern int nv20_graph_init(drm_device_t *dev); extern void nv20_graph_takedown(drm_device_t *dev); -extern int nv20_graph_context_create(drm_device_t *dev, int channel); +extern int nv20_graph_create_context(drm_device_t *dev, int channel); +extern void nv20_graph_destroy_context(drm_device_t *dev, int channel); +extern int nv20_graph_load_context(drm_device_t *dev, int channel); +extern int nv20_graph_save_context(drm_device_t *dev, int channel); /* nv30_graph.c */ extern int nv30_graph_init(drm_device_t *dev); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 1ef5a425..b47d4e0c 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -289,34 +289,10 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); /* Create a graphics context for new channel */ - switch(dev_priv->card_type) - { - case NV_04: - case NV_05: - nv04_graph_context_create(dev, channel); - break; - case NV_10: - case NV_17: - nv10_graph_context_create(dev, channel); - break; - case NV_20: - ret = nv20_graph_context_create(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - break; - default: - if (!engine->graph.create_context) { - DRM_ERROR("graph.create_context == NULL\n"); - return DRM_ERR(EINVAL); - } - ret = engine->graph.create_context(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - break; + ret = engine->graph.create_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; } /* Construct inital RAMFC for new channel */ diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index a997b075..b3562e2f 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -88,6 +88,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fb.takedown = nv04_fb_takedown; engine->graph.init = nv04_graph_init; engine->graph.takedown = nv04_graph_takedown; + engine->graph.create_context = nv04_graph_create_context; + engine->graph.destroy_context = nv04_graph_destroy_context; + engine->graph.load_context = nv04_graph_load_context; + engine->graph.save_context = nv04_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv04_fifo_create_context; @@ -104,6 +108,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fb.takedown = nv10_fb_takedown; engine->graph.init = nv10_graph_init; engine->graph.takedown = nv10_graph_takedown; + engine->graph.create_context = nv10_graph_create_context; + engine->graph.destroy_context = nv10_graph_destroy_context; + engine->graph.load_context = nv10_graph_load_context; + engine->graph.save_context = nv10_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; if (dev_priv->chipset < 0x17) { @@ -127,6 +135,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fb.takedown = nv10_fb_takedown; engine->graph.init = nv20_graph_init; engine->graph.takedown = nv20_graph_takedown; + engine->graph.create_context = nv20_graph_create_context; + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.save_context = nv20_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv10_fifo_create_context; diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 0cd4d3b8..1aaae33c 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -336,7 +336,7 @@ void nouveau_nv04_context_switch(drm_device_t *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x1); } -int nv04_graph_context_create(drm_device_t *dev, int channel) { +int nv04_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; DRM_DEBUG("nv04_graph_context_create %d\n", channel); @@ -351,6 +351,21 @@ int nv04_graph_context_create(drm_device_t *dev, int channel) { return 0; } +void nv04_graph_destroy_context(drm_device_t *dev, int channel) +{ +} + +int nv04_graph_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int nv04_graph_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} int nv04_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index fb189709..d1fe0a54 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -611,7 +611,7 @@ void nouveau_nv10_context_switch(drm_device_t *dev) if (offset > 0) \ fifo->pgraph_ctx[offset] = val; \ } while (0) -int nv10_graph_context_create(drm_device_t *dev, int channel) { +int nv10_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *fifo = &dev_priv->fifos[channel]; uint32_t tmp, vramsz; @@ -663,6 +663,21 @@ int nv10_graph_context_create(drm_device_t *dev, int channel) { return 0; } +void nv10_graph_destroy_context(drm_device_t *dev, int channel) +{ +} + +int nv10_graph_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int nv10_graph_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} int nv10_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 7190fc84..1b8a6727 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -29,7 +29,7 @@ #define NV20_GRCTX_SIZE (3529*4) -int nv20_graph_context_create(drm_device_t *dev, int channel) { +int nv20_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; @@ -47,10 +47,21 @@ int nv20_graph_context_create(drm_device_t *dev, int channel) { INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */ INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx)); - return 0; } +void nv20_graph_destroy_context(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->ramin_grctx) { + nouveau_instmem_free(dev, chan->ramin_grctx); + chan->ramin_grctx = NULL; + } + + INSTANCE_WR(dev_priv->ctx_table, channel, 0); +} + static void nv20_graph_rdi(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; @@ -65,40 +76,42 @@ static void nv20_graph_rdi(drm_device_t *dev) { /* Save current context (from PGRAPH) into the channel's context */ -static void nv20_graph_context_save_current(drm_device_t *dev, int channel) { +int nv20_graph_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; uint32_t instance; instance = INSTANCE_RD(dev_priv->ctx_table, channel); if (!instance) { - return; + return DRM_ERR(EINVAL); } if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) - DRM_ERROR("nv20_graph_context_save_current : bad instance\n"); + DRM_ERROR("nv20_graph_save_context : bad instance\n"); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 2 /* save ctx */); + return 0; } /* Restore the context for a specific channel into PGRAPH */ -static void nv20_graph_context_restore(drm_device_t *dev, int channel) { +int nv20_graph_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; uint32_t instance; instance = INSTANCE_RD(dev_priv->ctx_table, channel); if (!instance) { - return; + return DRM_ERR(EINVAL); } if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) - DRM_ERROR("nv20_graph_context_restore_current : bad instance\n"); + DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */); + return 0; } void nouveau_nv20_context_switch(drm_device_t *dev) @@ -113,13 +126,13 @@ void nouveau_nv20_context_switch(drm_device_t *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x0); - nv20_graph_context_save_current(dev, channel_old); + nv20_graph_save_context(dev, channel_old); nouveau_wait_for_idle(dev); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); - nv20_graph_context_restore(dev, channel); + nv20_graph_load_context(dev, channel); nouveau_wait_for_idle(dev); From 3dfc13e2da10e86051c7106feb5683542907acdc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 19:00:44 +1000 Subject: [PATCH 032/437] nouveau: kill some dead code --- shared-core/nouveau_fifo.c | 39 ++++++++++---------------------------- 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index b47d4e0c..3c07b0da 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -313,27 +313,16 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) * other case, the GPU will handle this when it switches contexts. */ if (dev_priv->fifo_alloc_count == 0) { - engine->fifo.load_context(dev, channel); + ret = engine->fifo.load_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } - if (engine->graph.load_context) { - ret = engine->graph.load_context(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - } else - if (dev_priv->card_type >= NV_30) { - uint32_t inst; - - inst = nouveau_chip_instance_get(dev, - chan->ramin_grctx); - - /* see comments in nv40_graph_context_restore() */ - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, inst); - if (dev_priv->card_type >= NV_40) { - NV_WRITE(0x40032C, inst | 0x01000000); - NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); - } + ret = engine->graph.load_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; } } @@ -370,15 +359,7 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) engine->fifo.destroy_context(dev, channel); /* Cleanup PGRAPH state */ - if (engine->graph.destroy_context) - engine->graph.destroy_context(dev, channel); - else if (dev_priv->card_type >= NV_30) { - } - else if (dev_priv->card_type >= NV_20) { - /* clear ctx table */ - INSTANCE_WR(dev_priv->ctx_table, channel, 0); - nouveau_instmem_free(dev, chan->ramin_grctx); - } + engine->graph.destroy_context(dev, channel); /* reenable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); From 9f617522d9cb8cd33e588d12a13f427dbe5171c2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Jun 2007 01:57:57 +1000 Subject: [PATCH 033/437] nouveau: NV49/NV4B PGRAPH setup from jb17bsome and stephan_2303 --- shared-core/nouveau_fifo.c | 2 + shared-core/nv40_graph.c | 497 ++++++++++++++++++++++++++++++++++++- 2 files changed, 494 insertions(+), 5 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 3c07b0da..1a06f913 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -83,6 +83,8 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev) case NV_50: case NV_40: NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); + if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b)) + NV_WRITE(0x2230,0x00000001); break; case NV_44: NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 245da54d..acd0cb0f 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -36,7 +36,9 @@ #define NV40_GRCTX_SIZE (175*1024) #define NV43_GRCTX_SIZE (70*1024) #define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */ +#define NV49_GRCTX_SIZE (164640) #define NV4A_GRCTX_SIZE (64*1024) +#define NV4B_GRCTX_SIZE (164640) #define NV4C_GRCTX_SIZE (25*1024) #define NV4E_GRCTX_SIZE (25*1024) @@ -44,7 +46,8 @@ * contexts are taken from dumps just after the 3D object is * created. */ -static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -300,7 +303,8 @@ nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); }; -static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -450,7 +454,231 @@ static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } -static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv49_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); + INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); + INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); + INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); + INSTANCE_WR(ctx, 0x00218/4, 0x00000040); + INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00220/4, 0x00000040); + INSTANCE_WR(ctx, 0x00228/4, 0x00000040); + INSTANCE_WR(ctx, 0x00234/4, 0x80000000); + INSTANCE_WR(ctx, 0x00238/4, 0x80000000); + INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00240/4, 0x80000000); + INSTANCE_WR(ctx, 0x00244/4, 0x80000000); + INSTANCE_WR(ctx, 0x00248/4, 0x80000000); + INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00250/4, 0x80000000); + INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); + INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); + INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); + INSTANCE_WR(ctx, 0x00428/4, 0x00000008); + INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); + INSTANCE_WR(ctx, 0x00460/4, 0x00000111); + INSTANCE_WR(ctx, 0x00464/4, 0x00000111); + INSTANCE_WR(ctx, 0x00468/4, 0x00000111); + INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00470/4, 0x00000111); + INSTANCE_WR(ctx, 0x00474/4, 0x00000111); + INSTANCE_WR(ctx, 0x00478/4, 0x00000111); + INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00480/4, 0x00000111); + INSTANCE_WR(ctx, 0x00484/4, 0x00000111); + INSTANCE_WR(ctx, 0x00488/4, 0x00000111); + INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00490/4, 0x00000111); + INSTANCE_WR(ctx, 0x00494/4, 0x00000111); + INSTANCE_WR(ctx, 0x00498/4, 0x00000111); + INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); + INSTANCE_WR(ctx, 0x00514/4, 0x00000080); + INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); + INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00530/4, 0x46400000); + INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00544/4, 0x88888888); + INSTANCE_WR(ctx, 0x00548/4, 0x88888888); + INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00550/4, 0x88888888); + INSTANCE_WR(ctx, 0x00554/4, 0x88888888); + INSTANCE_WR(ctx, 0x00558/4, 0x88888888); + INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00560/4, 0x88888888); + INSTANCE_WR(ctx, 0x00564/4, 0x88888888); + INSTANCE_WR(ctx, 0x00568/4, 0x88888888); + INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00570/4, 0x88888888); + INSTANCE_WR(ctx, 0x00574/4, 0x88888888); + INSTANCE_WR(ctx, 0x00578/4, 0x88888888); + INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00580/4, 0x88888888); + INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); + INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); + INSTANCE_WR(ctx, 0x00630/4, 0x70605040); + INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); + INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); + INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); + INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); + for (i=0x00750; i<=0x0078c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00790; i<=0x007cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00810; i<=0x0084c; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x00850; i<=0x0088c; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00890; i<=0x008cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00910; i<=0x0094c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x009a0; i<=0x009ac; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x009b0; i<=0x009bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x009d0; i<=0x009dc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x009f0; i<=0x009fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); + INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); + INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); + INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); + INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); + INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); + INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); + INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); + INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); + INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); + INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); + INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); + for(i=0x030a0; i<=0x03118; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x098a0; i<=0x0ba90; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x0baa0; i<=0x0be90; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x0e2e0; i<=0x0fff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x10008; i<=0x104d0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x104e0; i<=0x108d0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x12d20; i<=0x14f10; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x14f20; i<=0x15310; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x17760; i<=0x19950; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x19960; i<=0x19d50; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x1c1a0; i<=0x1e390; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x1e3a0; i<=0x1e790; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x20be0; i<=0x22dd0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x22de0; i<=0x231d0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void +nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -566,8 +794,223 @@ static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } +static void +nv4b_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; -static void nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); + INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); + INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); + INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); + INSTANCE_WR(ctx, 0x00218/4, 0x00000040); + INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00220/4, 0x00000040); + INSTANCE_WR(ctx, 0x00228/4, 0x00000040); + INSTANCE_WR(ctx, 0x00234/4, 0x80000000); + INSTANCE_WR(ctx, 0x00238/4, 0x80000000); + INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00240/4, 0x80000000); + INSTANCE_WR(ctx, 0x00244/4, 0x80000000); + INSTANCE_WR(ctx, 0x00248/4, 0x80000000); + INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00250/4, 0x80000000); + INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); + INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); + INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); + INSTANCE_WR(ctx, 0x00428/4, 0x00000008); + INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); + INSTANCE_WR(ctx, 0x00460/4, 0x00000111); + INSTANCE_WR(ctx, 0x00464/4, 0x00000111); + INSTANCE_WR(ctx, 0x00468/4, 0x00000111); + INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00470/4, 0x00000111); + INSTANCE_WR(ctx, 0x00474/4, 0x00000111); + INSTANCE_WR(ctx, 0x00478/4, 0x00000111); + INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00480/4, 0x00000111); + INSTANCE_WR(ctx, 0x00484/4, 0x00000111); + INSTANCE_WR(ctx, 0x00488/4, 0x00000111); + INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00490/4, 0x00000111); + INSTANCE_WR(ctx, 0x00494/4, 0x00000111); + INSTANCE_WR(ctx, 0x00498/4, 0x00000111); + INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); + INSTANCE_WR(ctx, 0x00514/4, 0x00000080); + INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); + INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00530/4, 0x46400000); + INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00544/4, 0x88888888); + INSTANCE_WR(ctx, 0x00548/4, 0x88888888); + INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00550/4, 0x88888888); + INSTANCE_WR(ctx, 0x00554/4, 0x88888888); + INSTANCE_WR(ctx, 0x00558/4, 0x88888888); + INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00560/4, 0x88888888); + INSTANCE_WR(ctx, 0x00564/4, 0x88888888); + INSTANCE_WR(ctx, 0x00568/4, 0x88888888); + INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00570/4, 0x88888888); + INSTANCE_WR(ctx, 0x00574/4, 0x88888888); + INSTANCE_WR(ctx, 0x00578/4, 0x88888888); + INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00580/4, 0x88888888); + INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); + INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); + INSTANCE_WR(ctx, 0x00630/4, 0x70605040); + INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); + INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); + INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); + INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); + for (i=0x00750; i<=0x0078c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00790; i<=0x007cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00810; i<=0x0084c; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x00850; i<=0x0088c; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00890; i<=0x008cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00910; i<=0x0094c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x009a0; i<=0x009ac; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x009b0; i<=0x009bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x009d0; i<=0x009dc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x009f0; i<=0x009fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); + INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); + INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); + INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); + INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); + INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); + INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); + INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); + INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); + INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); + INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); + INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); + for(i=0x030a0; i<=0x03118; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x098a0; i<=0x0ba90; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x0baa0; i<=0x0be90; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x0e2e0; i<=0x0fff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x10008; i<=0x104d0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x104e0; i<=0x108d0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x12d20; i<=0x14f10; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x14f20; i<=0x15310; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x17760; i<=0x19950; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x19960; i<=0x19d50; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void +nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -673,7 +1116,8 @@ static void nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } -static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -802,10 +1246,18 @@ nv40_graph_create_context(drm_device_t *dev, int channel) ctx_size = NV46_GRCTX_SIZE; ctx_init = nv46_graph_context_init; break; + case 0x49: + ctx_size = NV49_GRCTX_SIZE; + ctx_init = nv49_graph_context_init; + break; case 0x4a: ctx_size = NV4A_GRCTX_SIZE; ctx_init = nv4a_graph_context_init; break; + case 0x4b: + ctx_size = NV4B_GRCTX_SIZE; + ctx_init = nv4b_graph_context_init; + break; case 0x4c: ctx_size = NV4C_GRCTX_SIZE; ctx_init = nv4c_graph_context_init; @@ -1037,6 +1489,39 @@ static uint32_t nv46_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; +//this is used for nv49 and nv4b +static uint32_t nv49_4b_ctx_voodoo[] ={ + 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, + 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000, + 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e, + 0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000, + 0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a, + 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210, + 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280, + 0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, + 0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118, + 0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, + 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, + 0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800, + 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, + 0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, + 0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, + 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a, + 0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88, + 0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f, + 0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280, + 0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68, + 0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e, + 0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, + 0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e, + 0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60, + 0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e, + 0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005, + 0x00700006, 0x0060000e, ~0 +}; + + static uint32_t nv4a_ctx_voodoo[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, @@ -1130,7 +1615,9 @@ nv40_graph_init(drm_device_t *dev) case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; + case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; + case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; default: DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", From 5c27f8a70e6e3684d8d58661a9cc918a3514fd14 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 09:51:55 -0700 Subject: [PATCH 034/437] Add support SiS based XGI chips to SiS DRM. --- shared-core/drm_pciids.txt | 2 ++ shared-core/sis_drv.h | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index ba02aa89..ad9994ec 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -211,6 +211,8 @@ 0x1039 0x6300 0 "SiS 630" 0x1039 0x6330 SIS_CHIP_315 "SiS 661" 0x1039 0x7300 0 "SiS 730" +0x18CA 0x0040 SIS_CHIP_315 "Volari V3XT/V5/V8" +0x18CA 0x0042 SIS_CHIP_315 "Volari Unknown" [tdfx] 0x121a 0x0003 0 "3dfx Voodoo Banshee" diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index 006d148c..ec572ad4 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -33,11 +33,11 @@ #define DRIVER_AUTHOR "SIS, Tungsten Graphics" #define DRIVER_NAME "sis" -#define DRIVER_DESC "SIS 300/630/540" -#define DRIVER_DATE "20060619" +#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8" +#define DRIVER_DATE "20070626" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_MINOR 3 +#define DRIVER_PATCHLEVEL 0 enum sis_family { SIS_OTHER = 0, From 7af9d670371de868f0642148fe2d594bc9a7dea3 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:05:29 -0700 Subject: [PATCH 035/437] Initial XP10 code drop from XGI. See attachment 10246 on https://bugs.freedesktop.org/show_bug.cgi?id=5921 --- linux-core/xgi_cmdlist.c | 348 ++++++++ linux-core/xgi_cmdlist.h | 79 ++ linux-core/xgi_drv.c | 1610 ++++++++++++++++++++++++++++++++++++++ linux-core/xgi_drv.h | 364 +++++++++ linux-core/xgi_fb.c | 528 +++++++++++++ linux-core/xgi_fb.h | 71 ++ linux-core/xgi_linux.h | 596 ++++++++++++++ linux-core/xgi_misc.c | 657 ++++++++++++++++ linux-core/xgi_misc.h | 49 ++ linux-core/xgi_pcie.c | 1060 +++++++++++++++++++++++++ linux-core/xgi_pcie.h | 73 ++ linux-core/xgi_regs.h | 410 ++++++++++ linux-core/xgi_types.h | 68 ++ 13 files changed, 5913 insertions(+) create mode 100644 linux-core/xgi_cmdlist.c create mode 100644 linux-core/xgi_cmdlist.h create mode 100644 linux-core/xgi_drv.c create mode 100644 linux-core/xgi_drv.h create mode 100644 linux-core/xgi_fb.c create mode 100644 linux-core/xgi_fb.h create mode 100644 linux-core/xgi_linux.h create mode 100644 linux-core/xgi_misc.c create mode 100644 linux-core/xgi_misc.h create mode 100644 linux-core/xgi_pcie.c create mode 100644 linux-core/xgi_pcie.h create mode 100644 linux-core/xgi_regs.h create mode 100644 linux-core/xgi_types.h diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c new file mode 100644 index 00000000..024b021c --- /dev/null +++ b/linux-core/xgi_cmdlist.c @@ -0,0 +1,348 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + + + +U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = +{ + 0x10000000, // 3D Type Begin, Invalid + 0x80000004, // Length = 4; + 0x00000000, + 0x00000000 +}; + +U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = +{ + FLUSH_2D, + FLUSH_2D, + FLUSH_2D, + FLUSH_2D +}; + +xgi_cmdring_info_t s_cmdring; + +static void addFlush2D(xgi_info_t *info); +static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo); +static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter); +static void xgi_cmdlist_reset(void); + +int xgi_cmdlist_initialize(xgi_info_t *info, U32 size) +{ + //xgi_mem_req_t mem_req; + xgi_mem_alloc_t mem_alloc; + + //mem_req.size = size; + + xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + + if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) + { + return -1; + } + + s_cmdring._cmdRingSize = mem_alloc.size; + s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; + s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; + + return 1; +} + +void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t *pCmdInfo) +{ + U32 beginPort; + /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + + /* Jong 05/25/2006 */ + /* return; */ + + beginPort = getCurBatchBeginPort(pCmdInfo); + XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + if (s_cmdring._lastBatchStartAddr == 0) + { + U32 portOffset; + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + + /* Jong 06132006; BASE_3D_ENG=0x2800 */ + /* beginPort: 2D: 0x30 */ + portOffset = BASE_3D_ENG + beginPort; + + // Enable PCI Trigger Mode + XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ + XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", M2REG_AUTO_LINK_SETTING_ADDRESS); + XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", M2REG_CLEAR_COUNTERS_MASK); + XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); + XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 06/14/2006; 0x400001a */ + XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | + 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 05/25/2006 */ + XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); + /* return; */ /* OK */ + + /* Jong 06/14/2006; 0x400000a */ + XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + // Send PCI begin command + XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); + /* return; */ + + XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", portOffset); + XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + + /* beginPort = 48; */ + /* 0xc100000 */ + dwWriteReg(portOffset, (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); + XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort<<22)); + XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); + XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", pCmdInfo->_curDebugID); + XGI_INFO("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); + XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); + /* return; */ /* OK */ + + /* 0x80000024 */ + dwWriteReg(portOffset+4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", BEGIN_LINK_ENABLE_MASK); + XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", pCmdInfo->_firstSize); + XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); + + /* 0x1010000 */ + dwWriteReg(portOffset+8, (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", pCmdInfo->_firstBeginAddr); + XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); + + /* Jong 06/13/2006 */ + xgi_dump_register(info); + + /* Jong 06/12/2006; system hang; marked for test */ + dwWriteReg(portOffset+12, 0); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + } + else + { + XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); + U32 *lastBatchVirtAddr; + + /* Jong 05/25/2006 */ + /* return; */ + + if (pCmdInfo->_firstBeginType == BTYPE_3D) + { + addFlush2D(info); + } + + lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; + lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; + lastBatchVirtAddr[3] = 0; + //barrier(); + lastBatchVirtAddr[0] = (beginPort<<22) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID); + + /* Jong 06/12/2006; system hang; marked for test */ + triggerHWCommandList(info, pCmdInfo->_beginCount); + + XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); + } + + s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; + XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); +} + + +/* + state: 0 - console + 1 - graphic + 2 - fb + 3 - logout +*/ +void xgi_state_change(xgi_info_t *info, xgi_state_info_t *pStateInfo) +{ +#define STATE_CONSOLE 0 +#define STATE_GRAPHIC 1 +#define STATE_FBTERM 2 +#define STATE_LOGOUT 3 +#define STATE_REBOOT 4 +#define STATE_SHUTDOWN 5 + + if ((pStateInfo->_fromState == STATE_GRAPHIC) + && (pStateInfo->_toState == STATE_CONSOLE)) + { + XGI_INFO("[kd] I see, now is to leaveVT\n"); + // stop to received batch + } + else if ((pStateInfo->_fromState == STATE_CONSOLE) + && (pStateInfo->_toState == STATE_GRAPHIC)) + { + XGI_INFO("[kd] I see, now is to enterVT\n"); + xgi_cmdlist_reset(); + } + else if ((pStateInfo->_fromState == STATE_GRAPHIC) + && ( (pStateInfo->_toState == STATE_LOGOUT) + ||(pStateInfo->_toState == STATE_REBOOT) + ||(pStateInfo->_toState == STATE_SHUTDOWN))) + { + XGI_INFO("[kd] I see, not is to exit from X\n"); + // stop to received batch + } + else + { + XGI_ERROR("[kd] Should not happen\n"); + } + +} + +void xgi_cmdlist_reset(void) +{ + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; +} + +void xgi_cmdlist_cleanup(xgi_info_t *info) +{ + if (s_cmdring._cmdRingBuffer != 0) + { + xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); + s_cmdring._cmdRingBuffer = 0; + s_cmdring._cmdRingOffset = 0; + s_cmdring._cmdRingSize = 0; + } +} + +static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter) +{ + static U32 s_triggerID = 1; + + //Fix me, currently we just trigger one time + while (triggerCounter--) + { + dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0xffff & s_triggerID++)); + // xgi_waitfor_pci_idle(info); + } +} + +static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo) +{ + // Convert the batch type to begin port ID + switch(pCmdInfo->_firstBeginType) + { + case BTYPE_2D: + return 0x30; + case BTYPE_3D: + return 0x40; + case BTYPE_FLIP: + return 0x50; + case BTYPE_CTRL: + return 0x20; + default: + //ASSERT(0); + return 0xff; + } +} + +static void addFlush2D(xgi_info_t *info) +{ + U32 *flushBatchVirtAddr; + U32 flushBatchHWAddr; + + U32 *lastBatchVirtAddr; + + /* check buf is large enough to contain a new flush batch */ + if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) + { + s_cmdring._cmdRingOffset = 0; + } + + flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; + flushBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, flushBatchHWAddr); + + /* not using memcpy for I assume the address is discrete */ + *(flushBatchVirtAddr + 0) = 0x10000000; + *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ + *(flushBatchVirtAddr + 2) = 0x00000000; + *(flushBatchVirtAddr + 3) = 0x00000000; + *(flushBatchVirtAddr + 4) = FLUSH_2D; + *(flushBatchVirtAddr + 5) = FLUSH_2D; + *(flushBatchVirtAddr + 6) = FLUSH_2D; + *(flushBatchVirtAddr + 7) = FLUSH_2D; + + // ASSERT(s_cmdring._lastBatchStartAddr != NULL); + lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; + lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; + lastBatchVirtAddr[3] = 0; + + //barrier(); + + // BTYPE_CTRL & NO debugID + lastBatchVirtAddr[0] = (0x20<<22) + (BEGIN_VALID_MASK); + + triggerHWCommandList(info, 1); + + s_cmdring._cmdRingOffset += 0x20; + s_cmdring._lastBatchStartAddr = flushBatchHWAddr; +} diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h new file mode 100644 index 00000000..1b0c4965 --- /dev/null +++ b/linux-core/xgi_cmdlist.h @@ -0,0 +1,79 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_CMDLIST_H_ +#define _XGI_CMDLIST_H_ + +#define ONE_BIT_MASK 0x1 +#define TWENTY_BIT_MASK 0xfffff +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define BASE_3D_ENG 0x2800 +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 + +typedef enum +{ + FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, + FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, + FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK +}FLUSH_CODE; + +typedef enum +{ + AGPCMDLIST_SCRATCH_SIZE = 0x100, + AGPCMDLIST_BEGIN_SIZE = 0x004, + AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, + AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, + AGPCMDLIST_FLUSH_CMD_LEN = 0x004, + AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE +}CMD_SIZE; + +typedef struct xgi_cmdring_info_s +{ + U32 _cmdRingSize; + U32 _cmdRingBuffer; + U32 _cmdRingBusAddr; + U32 _lastBatchStartAddr; + U32 _cmdRingOffset; +}xgi_cmdring_info_t; + +extern int xgi_cmdlist_initialize(xgi_info_t *info, U32 size); + +extern void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t * pCmdInfo); + +extern void xgi_state_change(xgi_info_t *info, xgi_state_info_t * pStateInfo); + +extern void xgi_cmdlist_cleanup(xgi_info_t *info); + +#endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c new file mode 100644 index 00000000..5e80d417 --- /dev/null +++ b/linux-core/xgi_drv.c @@ -0,0 +1,1610 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +/* for debug */ +static int xgi_temp = 1; +/* + * global parameters + */ +static struct xgi_dev { + u16 vendor; + u16 device; + const char *name; +} xgidev_list[] = { + {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, + {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, + {0, 0, NULL} +}; + +int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ + +static int xgi_num_devices = 0; + +xgi_info_t xgi_devices[XGI_MAX_DEVICES]; + +#if defined(XGI_PM_SUPPORT_APM) +static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; +#endif + +/* add one for the control device */ +xgi_info_t xgi_ctl_device; +wait_queue_head_t xgi_ctl_waitqueue; + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry *proc_xgi; +#endif + +#ifdef CONFIG_DEVFS_FS +devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; +#endif + +struct list_head xgi_mempid_list; + +/* xgi_ functions.. do not take a state device parameter */ +static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info); +static void xgi_proc_create(void); +static void xgi_proc_remove_all(struct proc_dir_entry *); +static void xgi_proc_remove(void); + +/* xgi_kern_ functions, interfaces used by linux kernel */ +int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); + +unsigned int xgi_kern_poll(struct file *, poll_table *); +int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +int xgi_kern_mmap(struct file *, struct vm_area_struct *); +int xgi_kern_open(struct inode *, struct file *); +int xgi_kern_release(struct inode *inode, struct file *filp); + +void xgi_kern_vma_open(struct vm_area_struct *vma); +void xgi_kern_vma_release(struct vm_area_struct *vma); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access); +#endif + +int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); + +int xgi_kern_ctl_open(struct inode *, struct file *); +int xgi_kern_ctl_close(struct inode *, struct file *); +unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); + +void xgi_kern_isr_bh(unsigned long); +irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); + +static void xgi_lock_init(xgi_info_t *info); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_kern_acpi_standby(struct pci_dev *, u32); +int xgi_kern_acpi_resume(struct pci_dev *); +#endif + +/* + * verify access to pci config space wasn't disabled behind our back + * unfortunately, XFree86 enables/disables memory access in pci config space at + * various times (such as restoring initial pci config space settings during vt + * switches or when doing mulicard). As a result, all of our register accesses + * are garbage at this point. add a check to see if access was disabled and + * reenable any such access. + */ +#define XGI_CHECK_PCI_CONFIG(xgi) \ + xgi_check_pci_config(xgi, __LINE__) + +static inline void xgi_check_pci_config(xgi_info_t *info, int line) +{ + unsigned short cmd, flag = 0; + + // don't do this on the control device, only the actual devices + if (info->flags & XGI_FLAG_CONTROL) + return; + + pci_read_config_word(info->dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) + { + XGI_INFO("restoring bus mastering! (%d)\n", line); + cmd |= PCI_COMMAND_MASTER; + flag = 1; + } + + if (!(cmd & PCI_COMMAND_MEMORY)) + { + XGI_INFO("restoring MEM access! (%d)\n", line); + cmd |= PCI_COMMAND_MEMORY; + flag = 1; + } + + if (flag) + pci_write_config_word(info->dev, PCI_COMMAND, cmd); +} + +static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info) +{ + return 1; +} + +/* + * struct pci_device_id { + * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID + * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID + * unsigned int class, class_mask; // (class,subclass,prog-if) triplet + * unsigned long driver_data; // Data private to the driver + * }; + */ + +static struct pci_device_id xgi_dev_table[] = { + { + .vendor = PCI_VENDOR_ID_XGI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0, + }, + { } +}; + +/* + * #define MODULE_DEVICE_TABLE(type,name) \ + * MODULE_GENERIC_TABLE(type##_device,name) + */ + MODULE_DEVICE_TABLE(pci, xgi_dev_table); + +/* + * struct pci_driver { + * struct list_head node; + * char *name; + * const struct pci_device_id *id_table; // NULL if wants all devices + * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted + * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) + * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context + * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended + * int (*resume)(struct pci_dev *dev); // Device woken up + * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event + * }; + */ +static struct pci_driver xgi_pci_driver = { + .name = "xgi", + .id_table = xgi_dev_table, + .probe = xgi_kern_probe, +#if defined(XGI_SUPPORT_ACPI) + .suspend = xgi_kern_acpi_standby, + .resume = xgi_kern_acpi_resume, +#endif +}; + +/* + * find xgi devices and set initial state + */ +int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +{ + xgi_info_t *info; + + if ((dev->vendor != PCI_VENDOR_ID_XGI) + || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) + { + return -1; + } + + if (xgi_num_devices == XGI_MAX_DEVICES) + { + XGI_INFO("maximum device number (%d) reached!\n", xgi_num_devices); + return -1; + } + + /* enable io, mem, and bus-mastering in pci config space */ + if (pci_enable_device(dev) != 0) + { + XGI_INFO("pci_enable_device failed, aborting\n"); + return -1; + } + + XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); + + pci_set_master(dev); + + info = &xgi_devices[xgi_num_devices]; + info->dev = dev; + info->vendor_id = dev->vendor; + info->device_id = dev->device; + info->bus = dev->bus->number; + info->slot = PCI_SLOT((dev)->devfn); + + xgi_lock_init(info); + + info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); + info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); + + /* check IO region */ + if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) + { + XGI_ERROR("cannot reserve MMIO memory\n"); + goto error_disable_dev; + } + + XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); + XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); + + info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, + info->mmio.size); + if (!info->mmio.vbase) + { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("info->mmio.vbase failed\n"); + goto error_disable_dev; + } + xgi_enable_mmio(info); + + //xgi_enable_ge(info); + + XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); + + info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); + info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); + + XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + /* check frame buffer region + if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) + { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("cannot reserve frame buffer memory\n"); + goto error_disable_dev; + } + + + info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, + info->fb.size); + + if (!info->fb.vbase) + { + release_mem_region(info->mmio.base, info->mmio.size); + release_mem_region(info->fb.base, info->fb.size); + XGI_ERROR("info->fb.vbase failed\n"); + goto error_disable_dev; + } + */ + info->fb.vbase = NULL; + XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); + + info->irq = dev->irq; + + /* check common error condition */ + if (info->irq == 0) + { + XGI_ERROR("Can't find an IRQ for your XGI card! \n"); + goto error_zero_dev; + } + XGI_INFO("info->irq: %lx \n", info->irq); + + //xgi_enable_dvi_interrupt(info); + + /* sanity check the IO apertures */ + if ((info->mmio.base == 0) || (info->mmio.size == 0) + || (info->fb.base == 0) || (info->fb.size == 0)) + { + XGI_ERROR("The IO regions for your XGI card are invalid.\n"); + + if ((info->mmio.base == 0) || (info->mmio.size == 0)) + { + XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", + info->mmio.base, + info->mmio.size); + } + + if ((info->fb.base == 0) || (info->fb.size == 0)) + { + XGI_ERROR("frame buffer appears to be wrong: 0x%lx 0x%lx\n", + info->fb.base, + info->fb.size); + } + + goto error_zero_dev; + } + + //xgi_num_devices++; + + return 0; + +error_zero_dev: + release_mem_region(info->fb.base, info->fb.size); + release_mem_region(info->mmio.base, info->mmio.size); + +error_disable_dev: + pci_disable_device(dev); + return -1; + +} + +/* + * vma operations... + * this is only called when the vmas are duplicated. this + * appears to only happen when the process is cloned to create + * a new process, and not when the process is threaded. + * + * increment the usage count for the physical pages, so when + * this clone unmaps the mappings, the pages are not + * deallocated under the original process. + */ +struct vm_operations_struct xgi_vm_ops = { + .open = xgi_kern_vma_open, + .close = xgi_kern_vma_release, + .nopage = xgi_kern_vma_nopage, +}; + +void xgi_kern_vma_open(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) + { + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_INC(block->use_count); + } +} + +void xgi_kern_vma_release(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) + { + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_DEC(block->use_count); + + /* + * if use_count is down to 0, the kernel virtual mapping was freed + * but the underlying physical pages were not, we need to clear the + * bit and free the physical pages. + */ + if (XGI_ATOMIC_READ(block->use_count) == 0) + { + // Need TO Finish + XGI_VMA_PRIVATE(vma) = NULL; + } + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) + { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, + block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); +out: + return page; + +} +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) + { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, + block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); +out: + return page; +} +#endif + +#if 0 +static struct file_operations xgi_fops = { + /* owner: THIS_MODULE, */ + poll: xgi_kern_poll, + ioctl: xgi_kern_ioctl, + mmap: xgi_kern_mmap, + open: xgi_kern_open, + release: xgi_kern_release, +}; +#endif + +static struct file_operations xgi_fops = { + .owner = THIS_MODULE, + .poll = xgi_kern_poll, + .ioctl = xgi_kern_ioctl, + .mmap = xgi_kern_mmap, + .open = xgi_kern_open, + .release = xgi_kern_release, +}; + +static xgi_file_private_t * xgi_alloc_file_private(void) +{ + xgi_file_private_t *fp; + + XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); + if (!fp) + return NULL; + + memset(fp, 0, sizeof(xgi_file_private_t)); + + /* initialize this file's event queue */ + init_waitqueue_head(&fp->wait_queue); + + xgi_init_lock(fp->fp_lock); + + return fp; +} + +static void xgi_free_file_private(xgi_file_private_t *fp) +{ + if (fp == NULL) + return; + + XGI_KFREE(fp, sizeof(xgi_file_private_t)); +} + +int xgi_kern_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = NULL; + int dev_num; + int result = 0, status; + + /* + * the type and num values are only valid if we are not using devfs. + * However, since we use them to retrieve the device pointer, we + * don't need them with devfs as filp->private_data is already + * initialized + */ + filp->private_data = xgi_alloc_file_private(); + if (filp->private_data == NULL) + return -ENOMEM; + + XGI_INFO("filp->private_data %p\n", filp->private_data); + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_open(inode, filp); + + /* what device are we talking about? */ + dev_num = XGI_DEVICE_NUMBER(inode); + if (dev_num >= XGI_MAX_DEVICES) + { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + return -ENODEV; + } + + info = &xgi_devices[dev_num]; + + XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); + + xgi_down(info->info_sem); + XGI_CHECK_PCI_CONFIG(info); + + XGI_INFO_FROM_FP(filp) = info; + + /* + * map the memory and allocate isr on first open + */ + + if (!(info->flags & XGI_FLAG_OPEN)) + { + XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); + + if (info->device_id == 0) + { + XGI_INFO("open of nonexistent device %d\n", dev_num); + result = -ENXIO; + goto failed; + } + + /* initialize struct irqaction */ + status = request_irq(info->irq, xgi_kern_isr, + SA_INTERRUPT | SA_SHIRQ, "xgi", + (void *) info); + if (status != 0) + { + if (info->irq && (status == -EBUSY)) + { + XGI_ERROR("Tried to get irq %d, but another driver", + (unsigned int) info->irq); + XGI_ERROR("has it and is not sharing it.\n"); + } + XGI_ERROR("isr request failed 0x%x\n", status); + result = -EIO; + goto failed; + } + + /* + * #define DECLARE_TASKLET(name, func, data) \ + * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + */ + info->tasklet.func = xgi_kern_isr_bh; + info->tasklet.data = (unsigned long) info; + tasklet_enable(&info->tasklet); + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + xgi_cmdlist_initialize(info, 0x100000); + + info->flags |= XGI_FLAG_OPEN; + } + + XGI_ATOMIC_INC(info->use_count); + +failed: + xgi_up(info->info_sem); + + if ((result) && filp->private_data) + { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + } + + return result; +} + +int xgi_kern_release(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_CHECK_PCI_CONFIG(info); + + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_close(inode, filp); + + XGI_INFO("Jong-xgi_kern_release on device %d\n", XGI_DEVICE_NUMBER(inode)); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) + { + + /* + * The usage count for this device has dropped to zero, it can be shut + * down safely; disable its interrupts. + */ + + /* + * Disable this device's tasklet to make sure that no bottom half will + * run with undefined device state. + */ + tasklet_disable(&info->tasklet); + + /* + * Free the IRQ, which may block until all pending interrupt processing + * has completed. + */ + free_irq(info->irq, (void *)info); + + xgi_cmdlist_cleanup(info); + + /* leave INIT flag alone so we don't reinit every time */ + info->flags &= ~XGI_FLAG_OPEN; + } + + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) + { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) +{ + //struct inode *inode = INODE_FROM_FP(filp); + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + xgi_pcie_block_t *block; + int pages = 0; + unsigned long prot; + + XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma)); + + XGI_CHECK_PCI_CONFIG(info); + + if (XGI_MASK_OFFSET(vma->vm_start) + || XGI_MASK_OFFSET(vma->vm_end)) + { + XGI_ERROR("VM: bad mmap range: %lx - %lx\n", + vma->vm_start, vma->vm_end); + return -ENXIO; + } + + pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + vma->vm_ops = &xgi_vm_ops; + + /* XGI IO(reg) space */ + if (IS_IO_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + /* mark it as IO so that we don't dump it on core dump */ + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap io space \n"); + } + /* XGI fb space */ + /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ + else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } + /* PCIE allocator */ + /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ + else if (IS_PCIE_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + xgi_down(info->pcie_sem); + + block = (xgi_pcie_block_t *)xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); + + if (block == NULL) + { + XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + if (block->page_count != pages) + { + XGI_ERROR("pre-allocated PCIE memory has wrong number of pages!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + vma->vm_private_data = block; + XGI_ATOMIC_INC(block->use_count); + xgi_up(info->pcie_sem); + + /* + * prevent the swapper from swapping it out + * mark the memory i/o so the buffers aren't + * dumped on core dumps */ + vma->vm_flags |= (VM_LOCKED | VM_IO); + + /* un-cached */ + prot = pgprot_val(vma->vm_page_prot); + /* + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD | _PAGE_PWT; + */ + vma->vm_page_prot = __pgprot(prot); + + XGI_INFO("VM: mmap pcie space \n"); + } +#if 0 + else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } +#endif + else + { + vma->vm_flags |= (VM_IO | VM_LOCKED); + XGI_ERROR("VM: mmap wrong range \n"); + } + + vma->vm_file = filp; + + return 0; +} + +unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) +{ + xgi_file_private_t *fp; + xgi_info_t *info; + unsigned int mask = 0; + unsigned long eflags; + + info = XGI_INFO_FROM_FP(filp); + + if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) + return xgi_kern_ctl_poll(filp, wait); + + fp = XGI_GET_FP(filp); + + if (!(filp->f_flags & O_NONBLOCK)) + { + /* add us to the list */ + poll_wait(filp, &fp->wait_queue, wait); + } + + xgi_lock_irqsave(fp->fp_lock, eflags); + + /* wake the user on any event */ + if (fp->num_events) + { + XGI_INFO("Hey, an event occured!\n"); + /* + * trigger the client, when they grab the event, + * we'll decrement the event count + */ + mask |= (POLLPRI|POLLIN); + } + xgi_unlock_irqsave(fp->fp_lock, eflags); + + return mask; +} + +int xgi_kern_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + xgi_info_t *info; + xgi_mem_alloc_t *alloc = NULL; + + int status = 0; + void *arg_copy; + int arg_size; + int err = 0; + + info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); + /* + * extract the type and number bitfields, and don't decode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) return -ENOTTY; + if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if (_IOC_DIR(cmd) & _IOC_READ) + { + err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + } + else if (_IOC_DIR(cmd) & _IOC_WRITE) + { + err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); + } + if (err) return -EFAULT; + + XGI_CHECK_PCI_CONFIG(info); + + arg_size = _IOC_SIZE(cmd); + XGI_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) + { + XGI_ERROR("failed to allocate ioctl memory\n"); + return -ENOMEM; + } + + /* Jong 05/25/2006 */ + /* copy_from_user(arg_copy, (void *)arg, arg_size); */ + if(copy_from_user(arg_copy, (void *)arg, arg_size)) + { + XGI_ERROR("failed to copyin ioctl data\n"); + XGI_INFO("Jong-copy_from_user-fail! \n"); + } + else + XGI_INFO("Jong-copy_from_user-OK! \n"); + + alloc = (xgi_mem_alloc_t *)arg_copy; + XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size); + + switch (_IOC_NR(cmd)) + { + case XGI_ESC_DEVICE_INFO: + XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); + xgi_get_device_info(info, (struct xgi_chip_info_s *) arg_copy); + break; + case XGI_ESC_POST_VBIOS: + XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); + break; + case XGI_ESC_FB_ALLOC: + XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); + xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); + break; + case XGI_ESC_FB_FREE: + XGI_INFO("Jong-xgi_ioctl_fb_free \n"); + xgi_fb_free(info, *(unsigned long *) arg_copy); + break; + case XGI_ESC_MEM_COLLECT: + XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); + xgi_mem_collect(info, (unsigned int *) arg_copy); + break; + case XGI_ESC_PCIE_ALLOC: + XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); + xgi_pcie_alloc(info, ((xgi_mem_req_t *)arg_copy)->size, + ((xgi_mem_req_t *)arg_copy)->owner, alloc); + break; + case XGI_ESC_PCIE_FREE: + XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", *((unsigned long *) arg_copy)); + xgi_pcie_free(info, *((unsigned long *) arg_copy)); + break; + case XGI_ESC_PCIE_CHECK: + XGI_INFO("Jong-xgi_pcie_heap_check \n"); + xgi_pcie_heap_check(); + break; + case XGI_ESC_GET_SCREEN_INFO: + XGI_INFO("Jong-xgi_get_screen_info \n"); + xgi_get_screen_info(info, (struct xgi_screen_info_s *) arg_copy); + break; + case XGI_ESC_PUT_SCREEN_INFO: + XGI_INFO("Jong-xgi_put_screen_info \n"); + xgi_put_screen_info(info, (struct xgi_screen_info_s *) arg_copy); + break; + case XGI_ESC_MMIO_INFO: + XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); + xgi_get_mmio_info(info, (struct xgi_mmio_info_s *) arg_copy); + break; + case XGI_ESC_GE_RESET: + XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); + xgi_ge_reset(info); + break; + case XGI_ESC_SAREA_INFO: + XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); + xgi_sarea_info(info, (struct xgi_sarea_info_s *) arg_copy); + break; + case XGI_ESC_DUMP_REGISTER: + XGI_INFO("Jong-xgi_ioctl_dump_register \n"); + xgi_dump_register(info); + break; + case XGI_ESC_DEBUG_INFO: + XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); + xgi_restore_registers(info); + //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + break; + case XGI_ESC_SUBMIT_CMDLIST: + XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); + xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); + break; + case XGI_ESC_TEST_RWINKERNEL: + XGI_INFO("Jong-xgi_test_rwinkernel \n"); + xgi_test_rwinkernel(info, *(unsigned long*) arg_copy); + break; + case XGI_ESC_STATE_CHANGE: + XGI_INFO("Jong-xgi_state_change \n"); + xgi_state_change(info, (xgi_state_info_t *) arg_copy); + break; + case XGI_ESC_CPUID: + XGI_INFO("Jong-XGI_ESC_CPUID \n"); + xgi_get_cpu_id((struct cpu_info_s*) arg_copy); + break; + default: + XGI_INFO("Jong-xgi_ioctl_default \n"); + status = -EINVAL; + break; + } + + if (copy_to_user((void *)arg, arg_copy, arg_size)) + { + XGI_ERROR("failed to copyout ioctl data\n"); + XGI_INFO("Jong-copy_to_user-fail! \n"); + } + else + XGI_INFO("Jong-copy_to_user-OK! \n"); + + XGI_KFREE(arg_copy, arg_size); + return status; +} + + +/* + * xgi control driver operations defined here + */ +int xgi_kern_ctl_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = &xgi_ctl_device; + + int rc = 0; + + XGI_INFO("Jong-xgi_kern_ctl_open\n"); + + xgi_down(info->info_sem); + info->device_number = XGI_CONTROL_DEVICE_NUMBER; + + /* save the xgi info in file->private_data */ + filp->private_data = info; + + if (XGI_ATOMIC_READ(info->use_count) == 0) + { + init_waitqueue_head(&xgi_ctl_waitqueue); + } + + info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; + + XGI_ATOMIC_INC(info->use_count); + xgi_up(info->info_sem); + + return rc; +} + +int xgi_kern_ctl_close(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-xgi_kern_ctl_close\n"); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) + { + info->flags = 0; + } + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) + { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table *wait) +{ + //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; + unsigned int ret = 0; + + if (!(filp->f_flags & O_NONBLOCK)) + { + poll_wait(filp, &xgi_ctl_waitqueue, wait); + } + + return ret; +} + +/* + * xgi proc system + */ +static u8 xgi_find_pcie_capability(struct pci_dev *dev) +{ + u16 status; + u8 cap_ptr, cap_id; + + pci_read_config_word(dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (dev->hdr_type) + { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do + { + cap_ptr &= 0xFC; + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); + } while (cap_ptr && cap_id != 0xFF); + + return 0; +} + +static struct pci_dev* xgi_get_pci_device(xgi_info_t *info) +{ + struct pci_dev *dev; + + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); + while (dev) + { + if (XGI_PCI_SLOT_NUMBER(dev) == info->slot + && XGI_PCI_BUS_NUMBER(dev) == info->bus) + return dev; + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); + } + + return NULL; +} + +int xgi_kern_read_card_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct pci_dev *dev; + char *type; + int len = 0; + + xgi_info_t *info; + info = (xgi_info_t *) data; + + dev = xgi_get_pci_device(info); + if (!dev) + return 0; + + type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; + len += sprintf(page+len, "Card Type: \t %s\n", type); + + XGI_PCI_DEV_PUT(dev); + return len; +} + +int xgi_kern_read_version(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + + len += sprintf(page+len, "XGI version: %s\n", "1.0"); + len += sprintf(page+len, "GCC version: %s\n", "3.0"); + + return len; +} + +int xgi_kern_read_pcie_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + +int xgi_kern_read_status(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + + +static void xgi_proc_create(void) +{ +#ifdef CONFIG_PROC_FS + + struct pci_dev *dev; + int i = 0; + char name[6]; + + struct proc_dir_entry *entry; + struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; + + xgi_info_t *info; + xgi_info_t *xgi_max_devices; + + /* world readable directory */ + int flags = S_IFDIR | S_IRUGO | S_IXUGO; + + proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); + if (!proc_xgi) + goto failed; + + proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); + if (!proc_xgi_cards) + goto failed; + + proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); + if (!proc_xgi_pcie) + goto failed; + + /* + * Set the module owner to ensure that the reference + * count reflects accesses to the proc files. + */ + proc_xgi->owner = THIS_MODULE; + proc_xgi_cards->owner = THIS_MODULE; + proc_xgi_pcie->owner = THIS_MODULE; + + xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; + for (info = xgi_devices; info < xgi_max_devices; info++) + { + if (info->device_id == 0) + break; + + /* world readable file */ + flags = S_IFREG | S_IRUGO; + + dev = xgi_get_pci_device(info); + if (!dev) + break; + + sprintf(name, "%d", i++); + entry = create_proc_entry(name, flags, proc_xgi_cards); + if (!entry) + { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_card_info; + entry->owner = THIS_MODULE; + + if (xgi_find_pcie_capability(dev)) + { + entry = create_proc_entry("status", flags, proc_xgi_pcie); + if (!entry) + { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_status; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("card", flags, proc_xgi_pcie); + if (!entry) + { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + } + + XGI_PCI_DEV_PUT(dev); + } + + entry = create_proc_entry("version", flags, proc_xgi); + if (!entry) + goto failed; + + entry->read_proc = xgi_kern_read_version; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); + if (!entry) + goto failed; + + entry->data = NULL; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + + return; + +failed: + XGI_ERROR("failed to create /proc entries!\n"); + xgi_proc_remove_all(proc_xgi); +#endif +} + +#ifdef CONFIG_PROC_FS +static void xgi_proc_remove_all(struct proc_dir_entry *entry) +{ + while (entry) + { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + xgi_proc_remove_all(entry->subdir); + remove_proc_entry(entry->name, entry->parent); + if (entry == proc_xgi) + break; + entry = next; + } +} +#endif + +static void xgi_proc_remove(void) +{ +#ifdef CONFIG_PROC_FS + xgi_proc_remove_all(proc_xgi); +#endif +} + +/* + * driver receives an interrupt if someone waiting, then hand it off. + */ +irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + xgi_info_t *info = (xgi_info_t *) dev_id; + u32 need_to_run_bottom_half = 0; + + //XGI_INFO("xgi_kern_isr \n"); + + //XGI_CHECK_PCI_CONFIG(info); + + //xgi_dvi_irq_handler(info); + + if (need_to_run_bottom_half) + { + tasklet_schedule(&info->tasklet); + } + + return IRQ_HANDLED; +} + +void xgi_kern_isr_bh(unsigned long data) +{ + xgi_info_t *info = (xgi_info_t *) data; + + XGI_INFO("xgi_kern_isr_bh \n"); + + //xgi_dvi_irq_handler(info); + + XGI_CHECK_PCI_CONFIG(info); +} + +static void xgi_lock_init(xgi_info_t *info) +{ + if (info == NULL) return; + + spin_lock_init(&info->info_lock); + + sema_init(&info->info_sem, 1); + sema_init(&info->fb_sem, 1); + sema_init(&info->pcie_sem, 1); + + XGI_ATOMIC_SET(info->use_count, 0); +} + +static void xgi_dev_init(xgi_info_t *info) +{ + struct pci_dev *pdev = NULL; + struct xgi_dev *dev; + int found = 0; + u16 pci_cmd; + + XGI_INFO("Enter xgi_dev_init \n"); + + //XGI_PCI_FOR_EACH_DEV(pdev) + { + for (dev = xgidev_list; dev->vendor; dev++) + { + if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device)) + { + XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor); + XGI_INFO("dev->device = pdev->device= %x \n", dev->device); + + xgi_devices[found].device_id = pdev->device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &xgi_devices[found].revision_id); + + XGI_INFO("PCI_REVISION_ID= %x \n", xgi_devices[found].revision_id); + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); + + break; + } + } + } +} +/* + * Export to Linux Kernel + */ + +static int __init xgi_init_module(void) +{ + xgi_info_t *info = &xgi_devices[xgi_num_devices]; + int i, result; + + XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); + //SET_MODULE_OWNER(&xgi_fops); + + memset(xgi_devices, 0, sizeof(xgi_devices)); + + if (pci_register_driver(&xgi_pci_driver) < 0) + { + pci_unregister_driver(&xgi_pci_driver); + XGI_ERROR("no XGI graphics adapter found\n"); + return -ENODEV; + } + + XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.base); + XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.size); + +/* Jong 07/27/2006; test for ubuntu */ +/* +#ifdef CONFIG_DEVFS_FS + + XGI_INFO("Jong-Use devfs \n"); + do + { + xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); + if (xgi_devfs_handles[0] == NULL) + { + result = -ENOMEM; + XGI_ERROR("devfs register failed\n"); + goto failed; + } + } while(0); +#else */ /* no devfs, do it the "classic" way */ + + + XGI_INFO("Jong-Use non-devfs \n"); + /* + * Register your major, and accept a dynamic number. This is the + * first thing to do, in order to avoid releasing other module's + * fops in scull_cleanup_module() + */ + result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); + if (result < 0) + { + XGI_ERROR("register chrdev failed\n"); + pci_unregister_driver(&xgi_pci_driver); + return result; + } + if (xgi_major == 0) xgi_major = result; /* dynamic */ + +/* #endif */ /* CONFIG_DEVFS_FS */ + + XGI_INFO("Jong-major number %d\n", xgi_major); + + /* instantiate tasklets */ + for (i = 0; i < XGI_MAX_DEVICES; i++) + { + /* + * We keep one tasklet per card to avoid latency issues with more + * than one device; no two instances of a single tasklet are ever + * executed concurrently. + */ + XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); + } + + /* init the xgi control device */ + { + xgi_info_t *info_ctl = &xgi_ctl_device; + xgi_lock_init(info_ctl); + } + + /* Init the resource manager */ + INIT_LIST_HEAD(&xgi_mempid_list); + if (!xgi_fb_heap_init(info)) + { + XGI_ERROR("xgi_fb_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* Init the resource manager */ + if (!xgi_pcie_heap_init(info)) + { + XGI_ERROR("xgi_pcie_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* create /proc/driver/xgi */ + xgi_proc_create(); + +#if defined(DEBUG) + inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); +#endif + + return 0; + +failed: +#ifdef CONFIG_DEVFS_FS + XGI_DEVFS_REMOVE_CONTROL(); + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + for (i = 0; i < xgi_num_devices; i++) + { + if (xgi_devices[i].dev) + { + release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); + release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); + } + } + + pci_unregister_driver(&xgi_pci_driver); + return result; + + return 1; +} + +void __exit xgi_exit_module(void) +{ + int i; + xgi_info_t *info, *max_devices; + +#ifdef CONFIG_DEVFS_FS + /* + XGI_DEVFS_REMOVE_CONTROL(); + for (i = 0; i < XGI_MAX_DEVICES; i++) + XGI_DEVFS_REMOVE_DEVICE(i); + */ + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); + for (i = 0; i < XGI_MAX_DEVICES; i++) + { + if (xgi_devices[i].dev) + { + /* clean up the flush2D batch array */ + xgi_cmdlist_cleanup(&xgi_devices[i]); + + if(xgi_devices[i].fb.vbase != NULL) + { + iounmap((void *)xgi_devices[i].fb.vbase); + xgi_devices[i].fb.vbase = NULL; + } + if(xgi_devices[i].mmio.vbase != NULL) + { + iounmap((void *)xgi_devices[i].mmio.vbase); + xgi_devices[i].mmio.vbase = NULL; + } + + //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); + //XGI_INFO("release frame buffer mem region scceeded\n"); + + release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); + XGI_INFO("release MMIO mem region scceeded\n"); + + xgi_fb_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); + + xgi_pcie_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); + + XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); + } + } + + pci_unregister_driver(&xgi_pci_driver); + + /* remove /proc/driver/xgi */ + xgi_proc_remove(); + +#if defined(DEBUG) + inter_module_unregister("xgi_devices"); +#endif +} + +module_init(xgi_init_module); +module_exit(xgi_exit_module); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_acpi_event(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_resume(struct pci_dev *dev) +{ + return 1; +} +#endif + +MODULE_AUTHOR("Andrea Zhang "); +MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); +MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h new file mode 100644 index 00000000..568a7af1 --- /dev/null +++ b/linux-core/xgi_drv.h @@ -0,0 +1,364 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_DRV_H_ +#define _XGI_DRV_H_ + +#define XGI_MAJOR_VERSION 0 +#define XGI_MINOR_VERSION 7 +#define XGI_PATCHLEVEL 5 + +#define XGI_DRV_VERSION "0.7.5" + +#ifndef XGI_DRV_NAME +#define XGI_DRV_NAME "xgi" +#endif + +/* + * xgi reserved major device number, Set this to 0 to + * request dynamic major number allocation. + */ +#ifndef XGI_DEV_MAJOR +#define XGI_DEV_MAJOR 0 +#endif + +#ifndef XGI_MAX_DEVICES +#define XGI_MAX_DEVICES 1 +#endif + +/* Jong 06/06/2006 */ +/* #define XGI_DEBUG */ + +#ifndef PCI_VENDOR_ID_XGI +/* +#define PCI_VENDOR_ID_XGI 0x1023 +*/ +#define PCI_VENDOR_ID_XGI 0x18CA + +#endif + +#ifndef PCI_DEVICE_ID_XP5 +#define PCI_DEVICE_ID_XP5 0x2200 +#endif + +#ifndef PCI_DEVICE_ID_XG47 +#define PCI_DEVICE_ID_XG47 0x0047 +#endif + +/* Macros to make printk easier */ +#define XGI_ERROR(fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +#define XGI_MEM_ERROR(area, fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +/* #define XGI_DEBUG */ + +#ifdef XGI_DEBUG +#define XGI_INFO(fmt, arg...) \ + printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) +/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ +#else +#define XGI_INFO(fmt, arg...) do { } while (0) +#endif + +/* device name length; must be atleast 8 */ +#define XGI_DEVICE_NAME_LENGTH 40 + +/* need a fake device number for control device; just to flag it for msgs */ +#define XGI_CONTROL_DEVICE_NUMBER 100 + +typedef struct { + U32 base; // pcie base is different from fb base + U32 size; + U8 *vbase; +} xgi_aperture_t; + +typedef struct xgi_screen_info_s { + U32 scrn_start; + U32 scrn_xres; + U32 scrn_yres; + U32 scrn_bpp; + U32 scrn_pitch; +} xgi_screen_info_t; + +typedef struct xgi_sarea_info_s { + U32 bus_addr; + U32 size; +} xgi_sarea_info_t; + +typedef struct xgi_info_s { + struct pci_dev *dev; + int flags; + int device_number; + int bus; /* PCI config info */ + int slot; + int vendor_id; + U32 device_id; + U8 revision_id; + + /* physical characteristics */ + xgi_aperture_t mmio; + xgi_aperture_t fb; + xgi_aperture_t pcie; + xgi_screen_info_t scrn_info; + xgi_sarea_info_t sarea_info; + + /* look up table parameters */ + U32 *lut_base; + U32 lutPageSize; + U32 lutPageOrder; + U32 isLUTInLFB; + U32 sdfbPageSize; + + U32 pcie_config; + U32 pcie_status; + U32 irq; + + atomic_t use_count; + + /* keep track of any pending bottom halfes */ + struct tasklet_struct tasklet; + + spinlock_t info_lock; + + struct semaphore info_sem; + struct semaphore fb_sem; + struct semaphore pcie_sem; +} xgi_info_t; + +typedef struct xgi_ioctl_post_vbios { + U32 bus; + U32 slot; +} xgi_ioctl_post_vbios_t; + +typedef enum xgi_mem_location_s +{ + NON_LOCAL = 0, + LOCAL = 1, + INVALID = 0x7fffffff +} xgi_mem_location_t; + +enum PcieOwner +{ + PCIE_2D = 0, + /* + PCIE_3D should not begin with 1, + 2D alloc pcie memory will use owner 1. + */ + PCIE_3D = 11,/*vetex buf*/ + PCIE_3D_CMDLIST = 12, + PCIE_3D_SCRATCHPAD = 13, + PCIE_3D_TEXTURE = 14, + PCIE_INVALID = 0x7fffffff +}; + +typedef struct xgi_mem_req_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long is_front; + enum PcieOwner owner; + unsigned long pid; +} xgi_mem_req_t; + +typedef struct xgi_mem_alloc_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long bus_addr; + unsigned long hw_addr; + unsigned long pid; +} xgi_mem_alloc_t; + +typedef struct xgi_chip_info_s { + U32 device_id; + char device_name[32]; + U32 vendor_id; + U32 curr_display_mode; //Singe, DualView(Contained), MHS + U32 fb_size; + U32 sarea_bus_addr; + U32 sarea_size; +} xgi_chip_info_t; + +typedef struct xgi_opengl_cmd_s { + U32 cmd; +} xgi_opengl_cmd_t; + +typedef struct xgi_mmio_info_s { + xgi_opengl_cmd_t cmd_head; + void *mmioBase; + int size; +} xgi_mmio_info_t; + +typedef enum { + BTYPE_2D = 0, + BTYPE_3D = 1, + BTYPE_FLIP = 2, + BTYPE_CTRL = 3, + BTYPE_NONE = 0x7fffffff +}BATCH_TYPE; + +typedef struct xgi_cmd_info_s { + BATCH_TYPE _firstBeginType; + U32 _firstBeginAddr; + U32 _firstSize; + U32 _curDebugID; + U32 _lastBeginAddr; + U32 _beginCount; +} xgi_cmd_info_t; + +typedef struct xgi_state_info_s { + U32 _fromState; + U32 _toState; +} xgi_state_info_t; + +typedef struct cpu_info_s { + U32 _eax; + U32 _ebx; + U32 _ecx; + U32 _edx; +} cpu_info_t; + +typedef struct xgi_mem_pid_s { + struct list_head list; + xgi_mem_location_t location; + unsigned long bus_addr; + unsigned long pid; +} xgi_mem_pid_t; + +/* + * Ioctl definitions + */ + +#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ + +#define XGI_IOCTL_BASE 0 +#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) +#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) + +#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) +#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) +#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) +#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) +#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) +#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) +#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) +#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) +#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) +#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) +#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) +#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) +#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) +#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) +#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) +#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) +#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) +#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) + +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) + +#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) + +#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) + +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) + +#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) + +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) + +#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) +#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_MAXNR 30 + +/* + * flags + */ +#define XGI_FLAG_OPEN 0x0001 +#define XGI_FLAG_NEEDS_POSTING 0x0002 +#define XGI_FLAG_WAS_POSTED 0x0004 +#define XGI_FLAG_CONTROL 0x0010 +#define XGI_FLAG_MAP_REGS_EARLY 0x0200 + +/* mmap(2) offsets */ + +#define IS_IO_OFFSET(info, offset, length) \ + (((offset) >= (info)->mmio.base) \ + && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) + +/* Jong 06/14/2006 */ +/* (info)->fb.base is a base address for physical (bus) address space */ +/* what's the definition of offest? on physical (bus) address space or HW address space */ +/* Jong 06/15/2006; use HW address space */ +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= 0) \ + && (((offset) + (length)) <= (info)->fb.size)) +#if 0 +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= (info)->fb.base) \ + && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) +#endif + +#define IS_PCIE_OFFSET(info, offset, length) \ + (((offset) >= (info)->pcie.base) \ + && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) + +extern int xgi_fb_heap_init(xgi_info_t *info); +extern void xgi_fb_heap_cleanup(xgi_info_t *info); + +extern void xgi_fb_alloc(xgi_info_t *info, xgi_mem_req_t *req, xgi_mem_alloc_t *alloc); +extern void xgi_fb_free(xgi_info_t *info, unsigned long offset); +extern void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt); + +extern int xgi_pcie_heap_init(xgi_info_t *info); +extern void xgi_pcie_heap_cleanup(xgi_info_t *info); + +extern void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t *alloc); +extern void xgi_pcie_free(xgi_info_t *info, unsigned long offset); +extern void xgi_pcie_heap_check(void); +extern void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address); +extern void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address); + +extern void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); +extern void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); + +extern void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address); + +#endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c new file mode 100644 index 00000000..67fdfe17 --- /dev/null +++ b/linux-core/xgi_fb.c @@ -0,0 +1,528 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_fb.h" + +#define XGI_FB_HEAP_START 0x1000000 + +static xgi_mem_heap_t *xgi_fb_heap; +static kmem_cache_t *xgi_fb_cache_block = NULL; +extern struct list_head xgi_mempid_list; + +static xgi_mem_block_t *xgi_mem_new_node(void); +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long size); +static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset); + +void xgi_fb_alloc(xgi_info_t *info, + xgi_mem_req_t *req, + xgi_mem_alloc_t *alloc) +{ + xgi_mem_block_t *block; + xgi_mem_pid_t *mempid_block; + + if (req->is_front) + { + alloc->location = LOCAL; + alloc->bus_addr = info->fb.base; + alloc->hw_addr = 0; + XGI_INFO("Video RAM allocation on front buffer successfully! \n"); + } + else + { + xgi_down(info->fb_sem); + block = xgi_mem_alloc(info, req->size); + xgi_up(info->fb_sem); + + if (block == NULL) + { + alloc->location = LOCAL; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("Video RAM allocation failed\n"); + } + else + { + XGI_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *) block->offset); + alloc->location = LOCAL; + alloc->size = block->size; + alloc->bus_addr = info->fb.base + block->offset; + alloc->hw_addr = block->offset; + + /* manage mempid */ + mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + mempid_block->location = LOCAL; + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + + XGI_INFO("Memory ProcessID add one fb block pid:%ld successfully! \n", mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_fb_free(xgi_info_t *info, unsigned long bus_addr) +{ + xgi_mem_block_t *block; + unsigned long offset = bus_addr - info->fb.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + + if (offset < 0) + { + XGI_INFO("free onscreen frame buffer successfully !\n"); + } + else + { + xgi_down(info->fb_sem); + block = xgi_mem_free(info, offset); + xgi_up(info->fb_sem); + + if (block == NULL) + { + XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", offset); + } + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) + { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) + { + list_del(&mempid_freeblock->list); + XGI_INFO("Memory ProcessID delete one fb block pid:%ld successfully! \n", mempid_freeblock->pid); + kfree(mempid_freeblock); + } + } +} + +int xgi_fb_heap_init(xgi_info_t *info) +{ + xgi_mem_block_t *block; + + xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); + if (!xgi_fb_heap) + { + XGI_ERROR("xgi_fb_heap alloc failed\n"); + return 0; + } + + INIT_LIST_HEAD(&xgi_fb_heap->free_list); + INIT_LIST_HEAD(&xgi_fb_heap->used_list); + INIT_LIST_HEAD(&xgi_fb_heap->sort_list); + + xgi_fb_cache_block = kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_fb_cache_block) + { + XGI_ERROR("Fail to creat xgi_fb_block\n"); + goto fail1; + } + + block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); + if (!block) + { + XGI_ERROR("kmem_cache_alloc failed\n"); + goto fail2; + } + + block->offset = XGI_FB_HEAP_START; + block->size = info->fb.size - XGI_FB_HEAP_START; + + list_add(&block->list, &xgi_fb_heap->free_list); + + xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + + XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); + XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); + + return 1; + +fail2: + if (xgi_fb_cache_block) + { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } +fail1: + if(xgi_fb_heap) + { + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + return 0; +} + +void xgi_fb_heap_cleanup(xgi_info_t *info) +{ + struct list_head *free_list, *temp; + xgi_mem_block_t *block; + int i; + + if (xgi_fb_heap) + { + free_list = &xgi_fb_heap->free_list; + for (i = 0; i < 3; i++, free_list++) + { + temp = free_list->next; + while (temp != free_list) + { + block = list_entry(temp, struct xgi_mem_block_s, list); + temp = temp->next; + + XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + i, block->offset, block->size); + //XGI_INFO("No. %d free block: 0x%p \n", i, block); + kmem_cache_free(xgi_fb_cache_block, block); + block = NULL; + } + } + XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + + if (xgi_fb_cache_block) + { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } +} + +static xgi_mem_block_t * xgi_mem_new_node(void) +{ + xgi_mem_block_t *block; + + block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); + if (!block) + { + XGI_ERROR("kmem_cache_alloc failed\n"); + return NULL; + } + + return block; +} + +#if 0 +static void xgi_mem_insert_node_after(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block); +static void xgi_mem_insert_node_before(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block); +static void xgi_mem_insert_node_head(xgi_mem_list_t *list, + xgi_mem_block_t *block); +static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, + xgi_mem_block_t *block); +static void xgi_mem_delete_node(xgi_mem_list_t *list, + xgi_mem_block_t *block); +/* + * insert node:block after node:current + */ +static void xgi_mem_insert_node_after(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block) +{ + block->prev = current; + block->next = current->next; + current->next = block; + + if (current == list->tail) + { + list->tail = block; + } + else + { + block->next->prev = block; + } +} + +/* + * insert node:block before node:current + */ +static void xgi_mem_insert_node_before(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block) +{ + block->prev = current->prev; + block->next = current; + current->prev = block; + if (current == list->head) + { + list->head = block; + } + else + { + block->prev->next = block; + } +} +void xgi_mem_insert_node_head(xgi_mem_list_t *list, + xgi_mem_block_t *block) +{ + block->next = list->head; + block->prev = NULL; + + if (NULL == list->head) + { + list->tail = block; + } + else + { + list->head->prev = block; + } + list->head = block; +} + +static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, + xgi_mem_block_t *block) + +{ + block->next = NULL; + block->prev = list->tail; + if (NULL == list->tail) + { + list->head = block; + } + else + { + list->tail->next = block; + } + list->tail = block; +} + +static void xgi_mem_delete_node(xgi_mem_list_t *list, + xgi_mem_block_t *block) +{ + if (block == list->head) + { + list->head = block->next; + } + if (block == list->tail) + { + list->tail = block->prev; + } + + if (block->prev) + { + block->prev->next = block->next; + } + if (block->next) + { + block->next->prev = block->prev; + } + + block->next = block->prev = NULL; +} +#endif +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long originalSize) +{ + struct list_head *free_list; + xgi_mem_block_t *block, *free_block, *used_block; + + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); + + if (size == 0) + { + XGI_ERROR("size == 0\n"); + return (NULL); + } + XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); + if (size > xgi_fb_heap->max_freesize) + { + XGI_ERROR("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", + size, xgi_fb_heap->max_freesize); + return (NULL); + } + + free_list = xgi_fb_heap->free_list.next; + + while (free_list != &xgi_fb_heap->free_list) + { + XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_mem_block_s, list); + if (size <= block->size) + { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_fb_heap->free_list) + { + XGI_ERROR("Can't allocate %ldk size from frame buffer memory !\n", size/1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) + { + used_block = free_block; + XGI_INFO("size == free_block->size: free_block = 0x%p\n", free_block); + list_del(&free_block->list); + } + else + { + used_block = xgi_mem_new_node(); + + if (used_block == NULL) return (NULL); + + if (used_block == free_block) + { + XGI_ERROR("used_block == free_block = 0x%p\n", used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_fb_heap->max_freesize -= size; + + list_add(&used_block->list, &xgi_fb_heap->used_list); + + return (used_block); +} + +static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_mem_block_t *used_block = NULL, *block = NULL; + xgi_mem_block_t *prev, *next; + + unsigned long upper; + unsigned long lower; + + used_list = xgi_fb_heap->used_list.next; + while (used_list != &xgi_fb_heap->used_list) + { + block = list_entry(used_list, struct xgi_mem_block_s, list); + if (block->offset == offset) + { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_fb_heap->used_list) + { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + xgi_fb_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_fb_heap->free_list.next; + while (free_list != &xgi_fb_heap->free_list) + { + block = list_entry(free_list, struct xgi_mem_block_s, list); + + if (block->offset == upper) + { + next = block; + } + else if ((block->offset + block->size) == lower) + { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) + { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_fb_cache_block, next); + kmem_cache_free(xgi_fb_cache_block, used_block); + + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) + { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) + { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (next); + } + + list_add(&used_block->list, &xgi_fb_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + return (used_block); +} + diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h new file mode 100644 index 00000000..4b7ec2f2 --- /dev/null +++ b/linux-core/xgi_fb.h @@ -0,0 +1,71 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_FB_H_ +#define _XGI_FB_H_ + +typedef struct xgi_mem_block_s { + struct list_head list; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; + +#if 0 +typedef struct xgi_mem_block_s { + struct xgi_mem_block_s *next; + struct xgi_mem_block_s *prev; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_list_s { + xgi_mem_block_t *head; + xgi_mem_block_t *tail; +} xgi_mem_list_t; + +typedef struct xgi_mem_heap_s { + xgi_mem_list_t *free_list; + xgi_mem_list_t *used_list; + xgi_mem_list_t *sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; +#endif + +#endif + diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h new file mode 100644 index 00000000..f207a4f6 --- /dev/null +++ b/linux-core/xgi_linux.h @@ -0,0 +1,596 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#ifndef _XGI_LINUX_H_ +#define _XGI_LINUX_H_ + +#include + +#ifndef LINUX_VERSION_CODE +#include +#endif + +#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) +# error "This driver does not support pre-2.4 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define KERNEL_2_4 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +# error "This driver does not support 2.5 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +#define KERNEL_2_6 +#else +# error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +#define MODVERSIONS +#endif + +#if defined (MODVERSIONS) && !defined (KERNEL_2_6) +#include +#endif + +#include /* printk */ +#include + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ +#include /* rdtsc rdtscl */ + +#include /* suser(), capable() replacement + for_each_task, for_each_process */ +#ifdef for_each_process +#define XGI_SCAN_PROCESS(p) for_each_process(p) +#else +#define XGI_SCAN_PROCESS(p) for_each_task(p) +#endif + +#ifdef KERNEL_2_6 +#include /* module_param() */ +#include /* kernel_locked */ +#include /* flush_tlb(), flush_tlb_all() */ +#include /* page table entry lookup */ +#endif + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include + +#include /* cli, sli, save_flags */ +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ + +#include +#include +#include + +#ifdef CONFIG_PROC_FS +#include +#endif + +#ifdef CONFIG_DEVFS_FS +#include +#endif + +#ifdef CONFIG_KMOD +#include +#endif + +#ifdef CONFIG_PM +#include +#endif + +#ifdef CONFIG_MTRR +#include +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) +#define AGPGART +#include +#include +#endif + +#ifndef MAX_ORDER +#ifdef KERNEL_2_4 +#define MAX_ORDER 10 +#endif +#ifdef KERNEL_2_6 +#define MAX_ORDER 11 +#endif +#endif + +#ifndef module_init +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#endif + +#ifndef minor +#define minor(x) MINOR(x) +#endif + +#ifndef IRQ_HANDLED +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif + +#if !defined (list_for_each) +#define list_for_each(pos, head) \ + for (pos = (head)->next, prefetch(pos->next); pos != (head); \ + pos = pos->next, prefetch(pos->next)) +#endif + +#ifdef KERNEL_2_4 +#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) +#endif +#ifdef KERNEL_2_6 +extern struct list_head pci_devices; /* list of all devices */ +#define XGI_PCI_FOR_EACH_DEV(dev) \ + for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) +#endif + +/* + * the following macro causes problems when used in the same module + * as module_param(); undef it so we don't accidentally mix the two + */ +#if defined (KERNEL_2_6) +#undef MODULE_PARM +#endif + +#ifdef EXPORT_NO_SYMBOLS +EXPORT_NO_SYMBOLS; +#endif + +#if defined (KERNEL_2_4) +#define XGI_IS_SUSER() suser() +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) +#define XGI_NUM_CPUS() smp_num_cpus +#define XGI_CLI() __cli() +#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt()) +#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") +#endif + +#if defined (KERNEL_2_6) +#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) +#define XGI_NUM_CPUS() num_online_cpus() +#define XGI_CLI() local_irq_disable() +#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) +#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) +#endif + +/* Earlier 2.4.x kernels don't have pci_disable_device() */ +#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT +#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) +#else +#define XGI_PCI_DISABLE_DEVICE(dev) +#endif + +/* common defines */ +#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) +#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) + +#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) +#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) + +#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) +#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) + +#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) +#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) + +#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number +#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) + +#ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) +#else +#define XGI_PCI_DEV_PUT(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) +#endif + +/* + * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver + * model is not sufficient for full acpi support. it may work in some cases, + * but not enough for us to officially support this configuration. + */ +#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#define XGI_PM_SUPPORT_ACPI +#endif + +#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) +#define XGI_PM_SUPPORT_APM +#endif + + +#if defined(CONFIG_DEVFS_FS) +#if defined(KERNEL_2_6) +typedef void* devfs_handle_t; +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = NULL; \ + if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ + S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ + { \ + __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ + } \ + __handle; \ + }) +/* +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) +*/ +#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") +#else // defined(KERNEL_2_4) +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ + XGI_DEV_MAJOR, _minor, \ + S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ + __handle; \ + }) + +#define XGI_DEVFS_REMOVE_DEVICE(i) \ + ({ \ + if (xgi_devfs_handles[i] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[i]); \ + } \ + }) +#define XGI_DEVFS_REMOVE_CONTROL() \ + ({ \ + if (xgi_devfs_handles[0] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[0]); \ + } \ + }) +#endif /* defined(KERNEL_2_4) */ +#endif /* defined(CONFIG_DEVFS_FS) */ + +#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) +#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) +#else +#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) +#endif + +#if defined(XGI_REMAP_PFN_RANGE_PRESENT) +#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ + remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) +#elif defined(XGI_REMAP_PAGE_RANGE_5) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#elif defined(XGI_REMAP_PAGE_RANGE_4) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) +#else +#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#endif + +#if defined(pmd_offset_map) +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset_map(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) \ + { \ + pmd_unmap(pg_mid_dir); \ + } +#else +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) +#endif + +#define XGI_PMD_PRESENT(pg_mid_dir) \ + ({ \ + if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ + { \ + XGI_PMD_UNMAP(pg_mid_dir); \ + pg_mid_dir = NULL; \ + } \ + pg_mid_dir != NULL; \ + }) + +#if defined(pte_offset_atomic) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_atomic(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_kunmap(pte); \ + } +#elif defined(pte_offset) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) +#else +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_map(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_unmap(pte); \ + } +#endif + +#define XGI_PTE_PRESENT(pte) \ + ({ \ + if (pte) \ + { \ + if (!pte_present(*pte)) \ + { \ + XGI_PTE_UNMAP(pte); pte = NULL; \ + } \ + } \ + pte != NULL; \ + }) + +#define XGI_PTE_VALUE(pte) \ + ({ \ + unsigned long __pte_value = pte_val(*pte); \ + XGI_PTE_UNMAP(pte); \ + __pte_value; \ + }) + +#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) +#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; + } +#endif + +#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) +/* Added define for write combining page, only valid if pat enabled. */ +#define _PAGE_WRTCOMB _PAGE_PWT +#define __PAGE_KERNEL_WRTCOMB \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) +#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) + +static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + { + pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); + } + return new_prot; + } +#endif + +#if !defined(page_to_pfn) +#define page_to_pfn(page) ((page) - mem_map) +#endif + +#define XGI_VMALLOC(ptr, size) \ + { \ + (ptr) = vmalloc_32(size); \ + } + +#define XGI_VFREE(ptr, size) \ + { \ + vfree((void *) (ptr)); \ + } + +#define XGI_IOREMAP(ptr, physaddr, size) \ + { \ + (ptr) = ioremap(physaddr, size); \ + } + +#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ + { \ + (ptr) = ioremap_nocache(physaddr, size); \ + } + +#define XGI_IOUNMAP(ptr, size) \ + { \ + iounmap(ptr); \ + } + +/* + * only use this because GFP_KERNEL may sleep.. + * GFP_ATOMIC is ok, it won't sleep + */ +#define XGI_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_KERNEL); \ + } + +#define XGI_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_ATOMIC); \ + } + +#define XGI_KFREE(ptr, size) \ + { \ + kfree((void *) (ptr)); \ + } + +#define XGI_GET_FREE_PAGES(ptr, order) \ + { \ + (ptr) = __get_free_pages(GFP_KERNEL, order); \ + } + +#define XGI_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +typedef struct xgi_pte_s { + unsigned long phys_addr; + unsigned long virt_addr; +} xgi_pte_t; + +/* + * AMD Athlon processors expose a subtle bug in the Linux + * kernel, that may lead to AGP memory corruption. Recent + * kernel versions had a workaround for this problem, but + * 2.4.20 is the first kernel to address it properly. The + * page_attr API provides the means to solve the problem. + */ +#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t *page_ptr) + { + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); + } +static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t *page_ptr) + { + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL); + } +#else +#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) +#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) +#endif + +#ifdef KERNEL_2_4 +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) + +#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) +#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) +#endif + +#ifdef KERNEL_2_6 +/* add for SUSE 9, Jill*/ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) +#else +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) +#endif +#define XGILockPage(page) SetPageLocked(page) +#define XGIUnlockPage(page) ClearPageLocked(page) +#endif + + +/* + * hide a pointer to struct xgi_info_t in a file-private info + */ + +typedef struct +{ + void *info; + U32 num_events; + spinlock_t fp_lock; + wait_queue_head_t wait_queue; +} xgi_file_private_t; + +#define FILE_PRIVATE(filp) ((filp)->private_data) + +#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) + +/* for the card devices */ +#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) + +#ifdef KERNEL_2_0 +#define INODE_FROM_FP(filp) ((filp)->f_inode) +#else +#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) +#endif + +#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) +#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) +#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) +#define XGI_ATOMIC_READ(data) atomic_read(&(data)) + +/* + * lock-related functions that should only be called from this file + */ +#define xgi_init_lock(lock) spin_lock_init(&lock) +#define xgi_lock(lock) spin_lock(&lock) +#define xgi_unlock(lock) spin_unlock(&lock) +#define xgi_down(lock) down(&lock) +#define xgi_up(lock) up(&lock) + +#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) +#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) + +#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c new file mode 100644 index 00000000..b15c7ecf --- /dev/null +++ b/linux-core/xgi_misc.c @@ -0,0 +1,657 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" + +void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t *req) +{ + req->device_id = info->device_id; + req->device_name[0] = 'x'; + req->device_name[1] = 'g'; + req->device_name[2] = '4'; + req->device_name[3] = '7'; + req->vendor_id = info->vendor_id; + req->curr_display_mode = 0; + req->fb_size = info->fb.size; + req->sarea_bus_addr = info->sarea_info.bus_addr; + req->sarea_size = info->sarea_info.size; +} + +void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req) +{ + req->mmioBase = (void *)info->mmio.base; + req->size = info->mmio.size; +} + +void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req) +{ + info->scrn_info.scrn_start = req->scrn_start; + info->scrn_info.scrn_xres = req->scrn_xres; + info->scrn_info.scrn_yres = req->scrn_yres; + info->scrn_info.scrn_bpp = req->scrn_bpp; + info->scrn_info.scrn_pitch = req->scrn_pitch; + + XGI_INFO("info->scrn_info.scrn_start: 0x%lx" + "info->scrn_info.scrn_xres: 0x%lx" + "info->scrn_info.scrn_yres: 0x%lx" + "info->scrn_info.scrn_bpp: 0x%lx" + "info->scrn_info.scrn_pitch: 0x%lx\n", + info->scrn_info.scrn_start, + info->scrn_info.scrn_xres, + info->scrn_info.scrn_yres, + info->scrn_info.scrn_bpp, + info->scrn_info.scrn_pitch); +} + +void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req) +{ + req->scrn_start = info->scrn_info.scrn_start; + req->scrn_xres = info->scrn_info.scrn_xres; + req->scrn_yres = info->scrn_info.scrn_yres; + req->scrn_bpp = info->scrn_info.scrn_bpp; + req->scrn_pitch = info->scrn_info.scrn_pitch; + + XGI_INFO("req->scrn_start: 0x%lx" + "req->scrn_xres: 0x%lx" + "req->scrn_yres: 0x%lx" + "req->scrn_bpp: 0x%lx" + "req->scrn_pitch: 0x%lx\n", + req->scrn_start, + req->scrn_xres, + req->scrn_yres, + req->scrn_bpp, + req->scrn_pitch); +} + +void xgi_ge_reset(xgi_info_t *info) +{ + xgi_disable_ge(info); + xgi_enable_ge(info); +} + +void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req) +{ + info->sarea_info.bus_addr = req->bus_addr; + info->sarea_info.size = req->size; + XGI_INFO("info->sarea_info.bus_addr: 0x%lx" + "info->sarea_info.size: 0x%lx\n", + info->sarea_info.bus_addr, + info->sarea_info.size); +} + +/* + * irq functions + */ +#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff + +static U32 s_invalid_begin = 0; + +BOOL xgi_ge_irq_handler(xgi_info_t *info) +{ + volatile U8 *mmio_vbase = info->mmio.vbase; + volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); + U32 int_status = ge_3d_status[4]; // interrupt status + U32 auto_reset_count = 0; + BOOL is_support_auto_reset = FALSE; + + // Check GE on/off + if (0 == (0xffffc0f0 & int_status)) + { + U32 old_ge_status = ge_3d_status[0x00]; + U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + if (0 != (0x1000 & int_status)) + { + // We got GE stall interrupt. + ge_3d_status[0x04] = int_status | 0x04000000; + + if (TRUE == is_support_auto_reset) + { + BOOL is_wrong_signal = FALSE; + static U32 last_int_tick_low, last_int_tick_high; + static U32 new_int_tick_low, new_int_tick_high; + static U32 continoue_int_count = 0; + // OE II is busy. + while (old_ge_status & 0x001c0000) + { + U16 check; + // Check Read back status + *(mmio_vbase + 0x235c) = 0x80; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if ((check & 0x3f) != ((check & 0x3f00) >> 8)) + { + is_wrong_signal = TRUE; + break; + } + // Check RO channel + *(mmio_vbase + 0x235c) = 0x83; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) + { + is_wrong_signal = TRUE; + break; + } + // Check RW channel + *(mmio_vbase + 0x235c) = 0x88; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) + { + is_wrong_signal = TRUE; + break; + } + // Check RO channel outstanding + *(mmio_vbase + 0x235c) = 0x8f; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) + { + is_wrong_signal = TRUE; + break; + } + // Check RW channel outstanding + *(mmio_vbase + 0x235c) = 0x90; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) + { + is_wrong_signal = TRUE; + break; + } + // No pending PCIE request. GE stall. + break; + } + + if (is_wrong_signal) + { + // Nothing but skip. + } + else if (0 == continoue_int_count++) + { + rdtsc(last_int_tick_low, last_int_tick_high); + } + else + { + rdtscl(new_int_tick_low); + if ((new_int_tick_low - last_int_tick_low) > STALL_INTERRUPT_RESET_THRESHOLD) + { + continoue_int_count = 0; + } + else if (continoue_int_count >= 3) + { + continoue_int_count = 0; + + // GE Hung up, need reset. + XGI_INFO("Reset GE!\n"); + + *(mmio_vbase + 0xb057) = 8; + int time_out = 0xffff; + while (0 != (ge_3d_status[0x00] & 0xf0000000)) + { + while (0 != ((--time_out) & 0xfff)); + if (0 == time_out) + { + XGI_INFO("Can not reset back 0x%lx!\n", ge_3d_status[0x00]); + *(mmio_vbase + 0xb057) = 0; + // Have to use 3x5.36 to reset. + // Save and close dynamic gating + U8 old_3ce = *(mmio_vbase + 0x3ce); + *(mmio_vbase + 0x3ce) = 0x2a; + U8 old_3cf = *(mmio_vbase + 0x3cf); + *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; + // Reset GE + U8 old_index = *(mmio_vbase + 0x3d4); + *(mmio_vbase + 0x3d4) = 0x36; + U8 old_36 = *(mmio_vbase + 0x3d5); + *(mmio_vbase + 0x3d5) = old_36 | 0x10; + while (0 != ((--time_out) & 0xfff)); + *(mmio_vbase + 0x3d5) = old_36; + *(mmio_vbase + 0x3d4) = old_index; + // Restore dynamic gating + *(mmio_vbase + 0x3cf) = old_3cf; + *(mmio_vbase + 0x3ce) = old_3ce; + break; + } + } + *(mmio_vbase + 0xb057) = 0; + + // Increase Reset counter + auto_reset_count++; + } + } + } + return TRUE; + } + else if (0 != (0x1 & int_status)) + { + s_invalid_begin++; + ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; + return TRUE; + } + } + return FALSE; +} + +BOOL xgi_crt_irq_handler(xgi_info_t *info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + + if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened + { + U8 op3cf_3d; + U8 op3cf_37; + + // What happened? + op3cf_37 = bIn3cf(0x37); + +#if 0 + if (op3cf_37 & 0x04) + device_status |= GDEVST_CONNECT; + else + device_status &= ~GDEVST_CONNECT; + + device_status |= GDEVST_DEVICE_CHANGED; + hw_status |= HWST_DEVICE_CHANGED; +#endif + // Clear CRT interrupt + op3cf_3d = bIn3cf(0x3d); + bOut3cf(0x3d, (op3cf_3d | 0x04)); + bOut3cf(0x3d, (op3cf_3d & ~0x04)); + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +BOOL xgi_dvi_irq_handler(xgi_info_t *info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened + { + U8 op3cf_39; + U8 op3cf_37; + U8 op3x5_5a; + U8 save_3x4 = bReadReg(0x3d4);; + + // What happened? + op3cf_37 = bIn3cf(0x37); +#if 0 + //Also update our internal flag + if (op3cf_37 & 0x10) // Second Monitor plugged In + { + device_status |= GDEVST_CONNECT; + //Because currenly we cannot determine if DVI digital + //or DVI analog is connected according to DVI interrupt + //We should still call BIOS to check it when utility ask us + device_status &= ~GDEVST_CHECKED; + } + else + { + device_status &= ~GDEVST_CONNECT; + } +#endif + //Notify BIOS that DVI plug/unplug happened + op3x5_5a = bIn3x5(0x5a); + bOut3x5(0x5a, op3x5_5a & 0xf7); + + bWriteReg(0x3d4, save_3x4); + + //device_status |= GDEVST_DEVICE_CHANGED; + //hw_status |= HWST_DEVICE_CHANGED; + + // Clear DVI interrupt + op3cf_39 = bIn3cf(0x39); + bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 + bOut3c5(0x39, (op3cf_39 | 0x01 )); //Set 3cf.39 bit 0 to 1 + + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +void xgi_dump_register(xgi_info_t *info) +{ + int i, j; + unsigned char temp; + + // 0x3C5 + printk("\r\n=====xgi_dump_register========0x%x===============\r\n", 0x3C5); + + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bIn3c5(i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3D5 + printk("\r\n====xgi_dump_register=========0x%x===============\r\n", 0x3D5); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bIn3x5(i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3CF + printk("\r\n=========xgi_dump_register====0x%x===============\r\n", 0x3CF); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bIn3cf(i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n=====xgi_dump_register======0x%x===============\r\n", 0xB000); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x5; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0xB000 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2200); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0xB; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2200 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2300); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x7; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2300 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2400); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2400 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2800); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2800 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } +} + +void xgi_restore_registers(xgi_info_t *info) +{ + bOut3x5(0x13, 0); + bOut3x5(0x8b, 2); +} + +void xgi_waitfor_pci_idle(xgi_info_t *info) +{ +#define WHOLD_GE_STATUS 0x2800 +#define IDLE_MASK ~0x90200000 + + int idleCount = 0; + while(idleCount < 5) + { + if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) + { + idleCount = 0; + } + else + { + idleCount ++; + } + } +} + +int xgi_get_cpu_id(struct cpu_info_s *arg) +{ + int op = arg->_eax; + __asm__("cpuid" + : "=a" (arg->_eax), + "=b" (arg->_ebx), + "=c" (arg->_ecx), + "=d" (arg->_edx) + : "0" (op)); + + XGI_INFO("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", + op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); +} + +/*memory collect function*/ +extern struct list_head xgi_mempid_list; +void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt) +{ + xgi_mem_pid_t *mempid_block; + struct list_head *mempid_list; + struct task_struct *p,*find; + unsigned int cnt = 0; + + mempid_list = xgi_mempid_list.next; + + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + mempid_list = mempid_list->next; + + find = NULL; + XGI_SCAN_PROCESS(p) + { + if (p->pid == mempid_block->pid) + { + XGI_INFO("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", mempid_block->pid, p->state, mempid_block->location, mempid_block->bus_addr); + find = p; + if (mempid_block->bus_addr == 0xFFFFFFFF) + ++cnt; + break; + } + } + if (!find) + { + if (mempid_block->location == LOCAL) + { + XGI_INFO("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); + xgi_fb_free(info, mempid_block->bus_addr); + } + else if (mempid_block->bus_addr != 0xFFFFFFFF) + { + XGI_INFO("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); + xgi_pcie_free(info, mempid_block->bus_addr); + } + else + { + /*only delete the memory block*/ + list_del(&mempid_block->list); + XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_block->pid); + kfree(mempid_block); + } + } + } + *pcnt = cnt; +} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h new file mode 100644 index 00000000..ac4daaa1 --- /dev/null +++ b/linux-core/xgi_misc.h @@ -0,0 +1,49 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#ifndef _XGI_MISC_H_ +#define _XGI_MISC_H_ + +extern void xgi_dump_register(xgi_info_t *info); +extern void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t * req); +extern void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req); +extern void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req); +extern void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req); +extern void xgi_ge_reset(xgi_info_t *info); +extern void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req); +extern int xgi_get_cpu_id(struct cpu_info_s *arg); + +extern void xgi_restore_registers(xgi_info_t *info); +extern BOOL xgi_ge_irq_handler(xgi_info_t *info); +extern BOOL xgi_crt_irq_handler(xgi_info_t *info); +extern BOOL xgi_dvi_irq_handler(xgi_info_t *info); +extern void xgi_waitfor_pci_idle(xgi_info_t *info); + + +#endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c new file mode 100644 index 00000000..62e2323f --- /dev/null +++ b/linux-core/xgi_pcie.c @@ -0,0 +1,1060 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" + +static xgi_pcie_heap_t *xgi_pcie_heap = NULL; +static kmem_cache_t *xgi_pcie_cache_block = NULL; +static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; +static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; +static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +extern struct list_head xgi_mempid_list; + +static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) +{ + struct page *page; + unsigned long page_addr = 0; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + + if (page_addr == 0UL) + { + XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", + page_count); + return 0; + } + + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) + { + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + } + + XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", + page_count, page_order, page_addr); + return page_addr; +} + +static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) +{ + struct page *page; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) + { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + + free_pages(page_addr, page_order); +} + +static int xgi_pcie_lut_init(xgi_info_t *info) +{ + unsigned char *page_addr = NULL; + unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; + unsigned long count = 0; + u8 temp = 0; + + /* Jong 06/06/2006 */ + unsigned long pcie_aperture_size; + + info->pcie.size = 128 * 1024 * 1024; + + /* Get current FB aperture size */ + temp = In3x5(0x27); + XGI_INFO("In3x5(0x27): 0x%x \n", temp); + + if (temp & 0x01) /* 256MB; Jong 06/05/2006; 0x10000000 */ + { + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size=256 * 1024 * 1024; + /* info->pcie.base = 256 * 1024 * 1024; */ /* pcie base is different from fb base */ + } + else /* 128MB; Jong 06/05/2006; 0x08000000 */ + { + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size=128 * 1024 * 1024; + /* info->pcie.base = 128 * 1024 * 1024; */ + } + + /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ + /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ + /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ + /* info->pcie.base=ioremap(0x0F000000, 0x10000000); */ /* Cause system hang */ + info->pcie.base=pcie_aperture_size; /* works */ + /* info->pcie.base=info->fb.base + info->fb.size; */ /* System hang */ + /* info->pcie.base=128 * 1024 * 1024;*/ /* System hang */ + + XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + + + /* Get current lookup table page size */ + temp = bReadReg(0xB00C); + if (temp & 0x04) /* 8KB */ + { + info->lutPageSize = 8 * 1024; + } + else /* 4KB */ + { + info->lutPageSize = 4 * 1024; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + +#if 0 + /* Get current lookup table location */ + temp = bReadReg(0xB00C); + if (temp & 0x02) /* LFB */ + { + info->isLUTInLFB = TRUE; + /* Current we only support lookup table in LFB */ + temp &= 0xFD; + bWriteReg(0xB00C, temp); + info->isLUTInLFB = FALSE; + } + else /* SFB */ + { + info->isLUTInLFB = FALSE; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + + /* Get current SDFB page size */ + temp = bReadReg(0xB00C); + if (temp & 0x08) /* 8MB */ + { + info->sdfbPageSize = 8 * 1024 * 1024; + } + else /* 4MB */ + { + info->sdfbPageSize = 4 * 1024 * 1024; + } +#endif + pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; + + /* + * Allocate memory for PCIE GART table; + */ + lutEntryNum = pciePageCount; + lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; + + /* get page_order base on page_count */ + count = lutPageCount; + for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder); + + if ((lutPageCount << 1) == (1 << lutPageOrder)) + { + lutPageOrder -= 1; + } + + XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", + lutEntryNum, lutPageCount, lutPageOrder); + + info->lutPageOrder = lutPageOrder; + page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); + + if (!page_addr) + { + XGI_ERROR("cannot allocate PCIE lut page!\n"); + goto fail; + } + info->lut_base = (unsigned long *)page_addr; + + XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", + page_addr, virt_to_phys(page_addr)); + + XGI_INFO("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", + info->lut_base, __pa(info->lut_base), info->lutPageOrder); + + /* + * clean all PCIE GART Entry + */ + memset(page_addr, 0, PAGE_SIZE << lutPageOrder); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ( "wbinvd" ::: "memory" ); +#else + mb(); +#endif + + /* Set GART in SFB */ + bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); + /* Set GART base address to HW */ + dwWriteReg(0xB034, __pa(info->lut_base)); + + return 1; +fail: + return 0; +} + +static void xgi_pcie_lut_cleanup(xgi_info_t *info) +{ + if (info->lut_base) + { + XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", + info->lut_base, info->lutPageOrder); + xgi_pcie_lut_free((unsigned long)info->lut_base, info->lutPageOrder); + info->lut_base = NULL; + } +} + +static xgi_pcie_block_t *xgi_pcie_new_node(void) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *)kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL); + if (block == NULL) + { + return NULL; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = 0; /* The block size. */ + block->bus_addr = 0; /* CPU access address/bus address */ + block->hw_addr = 0; /* GE access address */ + block->page_count = 0; + block->page_order = 0; + block->page_block = NULL; + block->page_table = NULL; + block->owner = PCIE_INVALID; + + return block; +} + +static void xgi_pcie_block_stuff_free(xgi_pcie_block_t *block) +{ + struct page *page; + xgi_page_block_t *page_block = block->page_block; + xgi_page_block_t *free_block; + unsigned long page_count = 0; + int i; + + //XGI_INFO("block->page_block: 0x%p \n", block->page_block); + while (page_block) + { + page_count = page_block->page_count; + + page = virt_to_page(page_block->virt_addr); + for (i = 0; i < page_count; i++, page++) + { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + free_pages(page_block->virt_addr, page_block->page_order); + + page_block->phys_addr = 0; + page_block->virt_addr = 0; + page_block->page_count = 0; + page_block->page_order = 0; + + free_block = page_block; + page_block = page_block->next; + //XGI_INFO("free free_block: 0x%p \n", free_block); + kfree(free_block); + free_block = NULL; + } + + if (block->page_table) + { + //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); + kfree(block->page_table); + block->page_table = NULL; + } +} + +int xgi_pcie_heap_init(xgi_info_t *info) +{ + xgi_pcie_block_t *block; + + if (!xgi_pcie_lut_init(info)) + { + XGI_ERROR("xgi_pcie_lut_init failed\n"); + return 0; + } + + xgi_pcie_heap = (xgi_pcie_heap_t *)kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); + if(!xgi_pcie_heap) + { + XGI_ERROR("xgi_pcie_heap alloc failed\n"); + goto fail1; + } + INIT_LIST_HEAD(&xgi_pcie_heap->free_list); + INIT_LIST_HEAD(&xgi_pcie_heap->used_list); + INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); + + xgi_pcie_heap->max_freesize = info->pcie.size; + + xgi_pcie_cache_block = kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_pcie_cache_block) + { + XGI_ERROR("Fail to creat xgi_pcie_block\n"); + goto fail2; + } + + block = (xgi_pcie_block_t *)xgi_pcie_new_node(); + if (!block) + { + XGI_ERROR("xgi_pcie_new_node failed\n"); + goto fail3; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = info->pcie.size; + + list_add(&block->list, &xgi_pcie_heap->free_list); + + XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); + return 1; +fail3: + if (xgi_pcie_cache_block) + { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } + +fail2: + if(xgi_pcie_heap) + { + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } +fail1: + xgi_pcie_lut_cleanup(info); + return 0; +} + +void xgi_pcie_heap_check(void) +{ + struct list_head *useList, *temp; + xgi_pcie_block_t *block; + unsigned int ownerIndex; + char *ownerStr[6] = {"2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE"}; + + if (xgi_pcie_heap) + { + useList = &xgi_pcie_heap->used_list; + temp = useList->next; + XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); + while (temp != useList) + { + block = list_entry(temp, struct xgi_pcie_block_s, list); + if (block->owner == PCIE_2D) + ownerIndex = 0; + else if (block->owner > PCIE_3D_TEXTURE || block->owner < PCIE_2D || block->owner < PCIE_3D) + ownerIndex = 5; + else + ownerIndex = block->owner - PCIE_3D + 1; + XGI_INFO("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", + ownerStr[ownerIndex], block->offset, block->size); + temp = temp->next; + } + + } +} + + +void xgi_pcie_heap_cleanup(xgi_info_t *info) +{ + struct list_head *free_list, *temp; + xgi_pcie_block_t *block; + int j; + + xgi_pcie_lut_cleanup(info); + XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); + + if (xgi_pcie_heap) + { + free_list = &xgi_pcie_heap->free_list; + for (j = 0; j < 3; j++, free_list++) + { + temp = free_list->next; + + while (temp != free_list) + { + block = list_entry(temp, struct xgi_pcie_block_s, list); + XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + j, block->offset, block->size); + xgi_pcie_block_stuff_free(block); + block->bus_addr = 0; + block->hw_addr = 0; + + temp = temp->next; + //XGI_INFO("No. %d free block: 0x%p \n", j, block); + kmem_cache_free(xgi_pcie_cache_block, block); + block = NULL; + } + } + + XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } + + if (xgi_pcie_cache_block) + { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } +} + + +static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t *info, + unsigned long originalSize, + enum PcieOwner owner) +{ + struct list_head *free_list; + xgi_pcie_block_t *block, *used_block, *free_block; + xgi_page_block_t *page_block, *prev_page_block; + struct page *page; + unsigned long page_order = 0, count = 0, index =0; + unsigned long page_addr = 0; + unsigned long *lut_addr = NULL; + unsigned long lut_id = 0; + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + int i, j, page_count = 0; + int temp = 0; + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); + + if (owner == PCIE_3D) + { + if (xgi_pcie_vertex_block) + { + XGI_INFO("PCIE Vertex has been created, return directly.\n"); + return xgi_pcie_vertex_block; + } + } + + if (owner == PCIE_3D_CMDLIST) + { + if (xgi_pcie_cmdlist_block) + { + XGI_INFO("PCIE Cmdlist has been created, return directly.\n"); + return xgi_pcie_cmdlist_block; + } + } + + if (owner == PCIE_3D_SCRATCHPAD) + { + if (xgi_pcie_scratchpad_block) + { + XGI_INFO("PCIE Scratchpad has been created, return directly.\n"); + return xgi_pcie_scratchpad_block; + } + } + + if (size == 0) + { + XGI_ERROR("size == 0 \n"); + return (NULL); + } + + XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); + if (size > xgi_pcie_heap->max_freesize) + { + XGI_ERROR("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", + size, xgi_pcie_heap->max_freesize); + return (NULL); + } + + /* Jong 05/30/2006; find next free list which has enough space*/ + free_list = xgi_pcie_heap->free_list.next; + while (free_list != &xgi_pcie_heap->free_list) + { + //XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (size <= block->size) + { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_pcie_heap->free_list) + { + XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size/1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) + { + used_block = free_block; + XGI_INFO("size==free_block->size: free_block = 0x%p\n", free_block); + list_del(&free_block->list); + } + else + { + used_block = xgi_pcie_new_node(); + if (used_block == NULL) + { + return NULL; + } + + if (used_block == free_block) + { + XGI_ERROR("used_block == free_block = 0x%p\n", used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_pcie_heap->max_freesize -= size; + + used_block->bus_addr = info->pcie.base + used_block->offset; + used_block->hw_addr = info->pcie.base + used_block->offset; + used_block->page_count = page_count = size / PAGE_SIZE; + + /* get page_order base on page_count */ + for (used_block->page_order = 0; page_count; page_count >>= 1) + { + ++used_block->page_order; + } + + if ((used_block->page_count << 1) == (1 << used_block->page_order)) + { + used_block->page_order--; + } + XGI_INFO("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", + used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr, used_block->page_count, used_block->page_order); + + used_block->page_block = NULL; + //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); + //if (!used_block->page_block) return NULL; + //used_block->page_block->next = NULL; + + used_block->page_table = (xgi_pte_t *)kmalloc(sizeof(xgi_pte_t) * used_block->page_count, GFP_KERNEL); + if (used_block->page_table == NULL) + { + goto fail; + } + + lut_id = (used_block->offset >> PAGE_SHIFT); + lut_addr = info->lut_base; + lut_addr += lut_id; + XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); + + /* alloc free pages from system */ + page_count = used_block->page_count; + page_block = used_block->page_block; + prev_page_block = used_block->page_block; + for (i = 0; page_count > 0; i++) + { + /* if size is bigger than 2M bytes, it should be split */ + if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) + { + page_order = XGI_PCIE_ALLOC_MAX_ORDER; + } + else + { + count = page_count; + for (page_order = 0; count; count >>= 1, ++page_order); + + if ((page_count << 1) == (1 << page_order)) + { + page_order -= 1; + } + } + + count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", page_addr); + + if (!page_addr) + { + XGI_ERROR("No: %d :Can't get free pages: 0x%lx from system memory !\n", + i, count); + goto fail; + } + + /* Jong 05/30/2006; test */ + memset((unsigned char *)page_addr, 0xFF, PAGE_SIZE << page_order); + /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ + + if (page_block == NULL) + { + page_block = (xgi_page_block_t *)kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); + if (!page_block) + { + XGI_ERROR("Can't get memory for page_block! \n"); + goto fail; + } + } + + if (prev_page_block == NULL) + { + used_block->page_block = page_block; + prev_page_block = page_block; + } + else + { + prev_page_block->next = page_block; + prev_page_block = page_block; + } + + page_block->next = NULL; + page_block->phys_addr = __pa(page_addr); + page_block->virt_addr = page_addr; + page_block->page_count = count; + page_block->page_order = page_order; + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", page_block->phys_addr); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", page_block->virt_addr); + + page = virt_to_page(page_addr); + + //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" + // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", + // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); + + for (j = 0 ; j < count; j++, page++, lut_addr++) + { + used_block->page_table[index + j].phys_addr = __pa(page_address(page)); + used_block->page_table[index + j].virt_addr = (unsigned long)page_address(page); + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", used_block->page_table[index + j].phys_addr); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", used_block->page_table[index + j].virt_addr); + + *lut_addr = __pa(page_address(page)); + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + + if (temp) + { + XGI_INFO("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", + __pa(page_address(page)), lut_addr, j, *lut_addr); + temp--; + } + } + + page_block = page_block->next; + page_count -= count; + index += count; + temp = 0; + } + + used_block->owner = owner; + list_add(&used_block->list, &xgi_pcie_heap->used_list); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ( "wbinvd" ::: "memory" ); +#else + mb(); +#endif + + /* Flush GART Table */ + bWriteReg(0xB03F, 0x40); + bWriteReg(0xB03F, 0x00); + + if (owner == PCIE_3D) + { + xgi_pcie_vertex_block = used_block; + } + + if (owner == PCIE_3D_CMDLIST) + { + xgi_pcie_cmdlist_block = used_block; + } + + if (owner == PCIE_3D_SCRATCHPAD) + { + xgi_pcie_scratchpad_block = used_block; + } + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); + return (used_block); + +fail: + xgi_pcie_block_stuff_free(used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + return NULL; +} + +static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t *info, unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_pcie_block_t *used_block, *block = NULL; + xgi_pcie_block_t *prev, *next; + unsigned long upper, lower; + + used_list = xgi_pcie_heap->used_list.next; + while (used_list != &xgi_pcie_heap->used_list) + { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + if (block->offset == offset) + { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_pcie_heap->used_list) + { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", + used_block, used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr); + + xgi_pcie_block_stuff_free(used_block); + + /* update xgi_pcie_heap */ + xgi_pcie_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_pcie_heap->free_list.next; + + while (free_list != &xgi_pcie_heap->free_list) + { + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (block->offset == upper) + { + next = block; + } + else if ((block->offset + block->size) == lower) + { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) + { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_pcie_cache_block, next); + kmem_cache_free(xgi_pcie_cache_block, used_block); + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) + { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) + { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (next); + } + + used_block->bus_addr = 0; + used_block->hw_addr = 0; + used_block->page_count = 0; + used_block->page_order = 0; + list_add(&used_block->list, &xgi_pcie_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + return (used_block); +} + +void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, + enum PcieOwner owner, xgi_mem_alloc_t *alloc) +{ + xgi_pcie_block_t *block; + xgi_mem_pid_t *mempid_block; + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_alloc(info, size, owner); + xgi_up(info->pcie_sem); + + if (block == NULL) + { + alloc->location = INVALID; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("PCIE RAM allocation failed\n"); + } + else + { + XGI_INFO("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", + block->offset, block->bus_addr); + alloc->location = NON_LOCAL; + alloc->size = block->size; + alloc->bus_addr = block->bus_addr; + alloc->hw_addr = block->hw_addr; + + /* + manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. + PCIE_3D request means a opengl process created. + PCIE_3D_TEXTURE request means texture cannot alloc from fb. + */ + if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) + { + mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + mempid_block->location = NON_LOCAL; + if (owner == PCIE_3D) + mempid_block->bus_addr = 0xFFFFFFFF;/*xgi_pcie_vertex_block has the address*/ + else + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + XGI_INFO("Memory ProcessID add one pcie block pid:%ld successfully! \n", mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_pcie_free(xgi_info_t *info, unsigned long bus_addr) +{ + xgi_pcie_block_t *block; + unsigned long offset = bus_addr - info->pcie.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + char isvertex = 0; + int processcnt; + + if (xgi_pcie_vertex_block && xgi_pcie_vertex_block->bus_addr == bus_addr) + isvertex = 1; + + if (isvertex) + { + /*check is there any other process using vertex*/ + processcnt = 0; + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) + { + ++processcnt; + } + mempid_list = mempid_list->next; + } + if (processcnt > 1) + { + return; + } + } + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_free(info, offset); + xgi_up(info->pcie_sem); + + if (block == NULL) + { + XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); + } + + if (isvertex) + xgi_pcie_vertex_block = NULL; + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) + { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) + { + list_del(&mempid_freeblock->list); + XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_freeblock->pid); + kfree(mempid_freeblock); + } +} + +/* + * given a bus address, fid the pcie mem block + * uses the bus address as the key. + */ +void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + int i; + + used_list = xgi_pcie_heap->used_list.next; + + while (used_list != &xgi_pcie_heap->used_list) + { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + + if (block->bus_addr == address) + { + return block; + } + + if (block->page_table) + { + for (i = 0; i < block->page_count; i++) + { + unsigned long offset = block->bus_addr; + if ( (address >= offset) && (address < (offset + PAGE_SIZE))) + { + return block; + } + } + } + used_list = used_list->next; + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + + return NULL; +} + +/* + address -- GE HW address + return -- CPU virtual address + + assume the CPU VAddr is continuous in not the same block +*/ +void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + unsigned long offset_in_page; + unsigned long loc_in_pagetable; + void * ret; + + XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); + + used_list = xgi_pcie_heap->used_list.next; + XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); + + offset_in_page = address & (PAGE_SIZE-1); + XGI_INFO("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", address, PAGE_SIZE-1, offset_in_page); + + while (used_list != &xgi_pcie_heap->used_list) + { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + XGI_INFO("Jong_05292006-block=0x%px\n", block); + XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", block->hw_addr); + XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + + if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) + { + loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT; + ret = (void*)(block->page_table[loc_in_pagetable].virt_addr + offset_in_page); + + XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); + XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", loc_in_pagetable); + XGI_INFO("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", block->page_table[loc_in_pagetable].virt_addr); + XGI_INFO("Jong_05292006-offset_in_page=%d\n", offset_in_page); + XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", ret); + + return ret ; + } + else + { + XGI_INFO("Jong_05292006-used_list = used_list->next;\n"); + used_list = used_list->next; + } + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + return NULL; +} + + +void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) +{ + +} + +void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) +{ +} + +/* + address -- GE hw address +*/ +void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address) +{ + unsigned long * virtaddr = 0; + if (address == 0) + { + XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); + return; + } + + virtaddr = (unsigned long *) xgi_find_pcie_virt(info, address); + + XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); + XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); + XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); + if (virtaddr != NULL) + { + *virtaddr = 0x00f00fff; + } + + XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); +} + diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h new file mode 100644 index 00000000..cd5f85b8 --- /dev/null +++ b/linux-core/xgi_pcie.h @@ -0,0 +1,73 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_PCIE_H_ +#define _XGI_PCIE_H_ + +#ifndef XGI_PCIE_ALLOC_MAX_ORDER +#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ +#endif + +typedef struct xgi_page_block_s { + struct xgi_page_block_s *next; + unsigned long phys_addr; + unsigned long virt_addr; + unsigned long page_count; + unsigned long page_order; +} xgi_page_block_t; + +typedef struct xgi_pcie_block_s { + struct list_head list; + unsigned long offset; /* block's offset in pcie memory, begin from 0 */ + unsigned long size; /* The block size. */ + unsigned long bus_addr; /* CPU access address/bus address */ + unsigned long hw_addr; /* GE access address */ + + unsigned long page_count; + unsigned long page_order; + xgi_page_block_t *page_block; + xgi_pte_t *page_table; /* list of physical pages allocated */ + + atomic_t use_count; + enum PcieOwner owner; + unsigned long processID; +} xgi_pcie_block_t; + +typedef struct xgi_pcie_list_s { + xgi_pcie_block_t *head; + xgi_pcie_block_t *tail; +} xgi_pcie_list_t; + +typedef struct xgi_pcie_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; +} xgi_pcie_heap_t; + +#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h new file mode 100644 index 00000000..18448139 --- /dev/null +++ b/linux-core/xgi_regs.h @@ -0,0 +1,410 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#ifndef _XGI_REGS_H_ +#define _XGI_REGS_H_ + +#ifndef XGI_MMIO + #define XGI_MMIO 1 +#endif + +#if XGI_MMIO +#define OUTB(port, value) writeb(value, info->mmio.vbase + port) +#define INB(port) readb(info->mmio.vbase + port) +#define OUTW(port, value) writew(value, info->mmio.vbase + port) +#define INW(port) readw(info->mmio.vbase + port) +#define OUTDW(port, value) writel(value, info->mmio.vbase + port) +#define INDW(port) readl(info->mmio.vbase + port) +#else +#define OUTB(port, value) outb(value, port) +#define INB(port) inb(port) +#define OUTW(port, value) outw(value, port) +#define INW(port) inw(port) +#define OUTDW(port, value) outl(value, port) +#define INDW(port) inl(port) +#endif + +/* Hardware access functions */ +static inline void OUT3C5B(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5B(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFB(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5B(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5B(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFB(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline void OUT3C5W(xgi_info_t *info, u8 index, u16 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5W(xgi_info_t *info, u8 index, u16 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFW(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5W(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5W(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFW(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline u8 readAttr(xgi_info_t *info, u8 index) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + return INB(0x3C1); +} + +static inline void writeAttr(xgi_info_t *info, u8 index, u8 value) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + OUTB(0x3C0, value); +} + +/* + * Graphic engine register (2d/3d) acessing interface + */ +static inline void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data) +{ + /* Jong 05/25/2006 */ + XGI_INFO("Jong-WriteRegDWord()-Begin \n"); + XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", info->mmio.vbase); + XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); + XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); + /* return; */ + + *(volatile u32*)(info->mmio.vbase + addr) = (data); + XGI_INFO("Jong-WriteRegDWord()-End \n"); +} + +static inline void WriteRegWord(xgi_info_t *info, u32 addr, u16 data) +{ + *(volatile u16*)(info->mmio.vbase + addr) = (data); +} + +static inline void WriteRegByte(xgi_info_t *info, u32 addr, u8 data) +{ + *(volatile u8*)(info->mmio.vbase + addr) = (data); +} + +static inline u32 ReadRegDWord(xgi_info_t *info, u32 addr) +{ + volatile u32 data; + data = *(volatile u32*)(info->mmio.vbase + addr); + return data; +} + +static inline u16 ReadRegWord(xgi_info_t *info, u32 addr) +{ + volatile u16 data; + data = *(volatile u16*)(info->mmio.vbase + addr); + return data; +} + +static inline u8 ReadRegByte(xgi_info_t *info, u32 addr) +{ + volatile u8 data; + data = *(volatile u8*)(info->mmio.vbase + addr); + return data; +} +#if 0 +extern void OUT3C5B(xgi_info_t *info, u8 index, u8 data); +extern void OUT3X5B(xgi_info_t *info, u8 index, u8 data); +extern void OUT3CFB(xgi_info_t *info, u8 index, u8 data); +extern u8 IN3C5B(xgi_info_t *info, u8 index); +extern u8 IN3X5B(xgi_info_t *info, u8 index); +extern u8 IN3CFB(xgi_info_t *info, u8 index); +extern void OUT3C5W(xgi_info_t *info, u8 index, u8 data); +extern void OUT3X5W(xgi_info_t *info, u8 index, u8 data); +extern void OUT3CFW(xgi_info_t *info, u8 index, u8 data); +extern u8 IN3C5W(xgi_info_t *info, u8 index); +extern u8 IN3X5W(xgi_info_t *info, u8 index); +extern u8 IN3CFW(xgi_info_t *info, u8 index); + +extern void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data); +extern void WriteRegWord(xgi_info_t *info, u32 addr, u16 data); +extern void WriteRegByte(xgi_info_t *info, u32 addr, u8 data); +extern u32 ReadRegDWord(xgi_info_t *info, u32 addr); +extern u16 ReadRegWord(xgi_info_t *info, u32 addr); +extern u8 ReadRegByte(xgi_info_t *info, u32 addr); + +extern void EnableProtect(); +extern void DisableProtect(); +#endif + +#define Out(port, data) OUTB(port, data) +#define bOut(port, data) OUTB(port, data) +#define wOut(port, data) OUTW(port, data) +#define dwOut(port, data) OUTDW(port, data) + +#define Out3x5(index, data) OUT3X5B(info, index, data) +#define bOut3x5(index, data) OUT3X5B(info, index, data) +#define wOut3x5(index, data) OUT3X5W(info, index, data) + +#define Out3c5(index, data) OUT3C5B(info, index, data) +#define bOut3c5(index, data) OUT3C5B(info, index, data) +#define wOut3c5(index, data) OUT3C5W(info, index, data) + +#define Out3cf(index, data) OUT3CFB(info, index, data) +#define bOut3cf(index, data) OUT3CFB(info, index, data) +#define wOut3cf(index, data) OUT3CFW(info, index, data) + +#define In(port) INB(port) +#define bIn(port) INB(port) +#define wIn(port) INW(port) +#define dwIn(port) INDW(port) + +#define In3x5(index) IN3X5B(info, index) +#define bIn3x5(index) IN3X5B(info, index) +#define wIn3x5(index) IN3X5W(info, index) + +#define In3c5(index) IN3C5B(info, index) +#define bIn3c5(index) IN3C5B(info, index) +#define wIn3c5(index) IN3C5W(info, index) + +#define In3cf(index) IN3CFB(info, index) +#define bIn3cf(index) IN3CFB(info, index) +#define wIn3cf(index) IN3CFW(info, index) + +#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) +#define wWriteReg(addr, data) WriteRegWord(info, addr, data) +#define bWriteReg(addr, data) WriteRegByte(info, addr, data) +#define dwReadReg(addr) ReadRegDWord(info, addr) +#define wReadReg(addr) ReadRegWord(info, addr) +#define bReadReg(addr) ReadRegByte(info, addr) + +static inline void xgi_protect_all(xgi_info_t *info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_unprotect_all(xgi_info_t *info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_enable_mmio(xgi_info_t *info) +{ + u8 protect = 0; + + /* Unprotect registers */ + outb(0x11, 0x3C4); + protect = inb(0x3C5); + outb(0x92, 0x3C5); + + outb(0x3A, 0x3D4); + outb(inb(0x3D5) | 0x20, 0x3D5); + + /* Enable MMIO */ + outb(0x39, 0x3D4); + outb(inb(0x3D5) | 0x01, 0x3D5); + + OUTB(0x3C4, 0x11); + OUTB(0x3C5, protect); +} + +static inline void xgi_disable_mmio(xgi_info_t *info) +{ + u8 protect = 0; + + /* unprotect registers */ + OUTB(0x3C4, 0x11); + protect = INB(0x3C5); + OUTB(0x3C5, 0x92); + + /* Disable MMIO access */ + OUTB(0x3D4, 0x39); + OUTB(0x3D5, INB(0x3D5) & 0xFE); + + /* Protect registers */ + outb(0x11, 0x3C4); + outb(protect, 0x3C5); +} + +static inline void xgi_enable_ge(xgi_info_t *info) +{ + unsigned char bOld3cf2a = 0; + int wait = 0; + + // Enable GE + OUTW(0x3C4, 0x9211); + + // Save and close dynamic gating + bOld3cf2a = bIn3cf(0x2a); + bOut3cf(0x2a, bOld3cf2a & 0xfe); + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) + { + bIn(0x36); + } + // Enable 2D engine only + bOut3x5(0x36, 0x80); + + // Enable 2D+3D engine + bOut3x5(0x36, 0x84); + + // Restore dynamic gating + bOut3cf(0x2a, bOld3cf2a); +} + +static inline void xgi_disable_ge(xgi_info_t *info) +{ + int wait = 0; + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) + { + bIn(0x36); + } + + // Disable 2D engine only + bOut3x5(0x36, 0); +} + +static inline void xgi_enable_dvi_interrupt(xgi_info_t *info) +{ + Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 + Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 + Out3cf(0x39, In3cf(0x39) | 0x02); +} +static inline void xgi_disable_dvi_interrupt(xgi_info_t *info) +{ + Out3cf(0x39,In3cf(0x39) & ~0x02); +} + +static inline void xgi_enable_crt1_interrupt(xgi_info_t *info) +{ + Out3cf(0x3d,In3cf(0x3d) | 0x04); + Out3cf(0x3d,In3cf(0x3d) & ~0x04); + Out3cf(0x3d,In3cf(0x3d) | 0x08); +} + +static inline void xgi_disable_crt1_interrupt(xgi_info_t *info) +{ + Out3cf(0x3d,In3cf(0x3d) & ~0x08); +} + +#endif + diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h new file mode 100644 index 00000000..24cb8f3c --- /dev/null +++ b/linux-core/xgi_types.h @@ -0,0 +1,68 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_TYPES_H_ +#define _XGI_TYPES_H_ + +/**************************************************************************** + * Typedefs * + ***************************************************************************/ + +typedef unsigned char V8; /* "void": enumerated or multiple fields */ +typedef unsigned short V16; /* "void": enumerated or multiple fields */ +typedef unsigned char U8; /* 0 to 255 */ +typedef unsigned short U16; /* 0 to 65535 */ +typedef signed char S8; /* -128 to 127 */ +typedef signed short S16; /* -32768 to 32767 */ +typedef float F32; /* IEEE Single Precision (S1E8M23) */ +typedef double F64; /* IEEE Double Precision (S1E11M52) */ +typedef unsigned long BOOL; +/* + * mainly for 64-bit linux, where long is 64 bits + * and win9x, where int is 16 bit. + */ +#if defined(vxworks) +typedef unsigned int V32; /* "void": enumerated or multiple fields */ +typedef unsigned int U32; /* 0 to 4294967295 */ +typedef signed int S32; /* -2147483648 to 2147483647 */ +#else +typedef unsigned long V32; /* "void": enumerated or multiple fields */ +typedef unsigned long U32; /* 0 to 4294967295 */ +typedef signed long S32; /* -2147483648 to 2147483647 */ +#endif + +#ifndef TRUE +#define TRUE 1UL +#endif + +#ifndef FALSE +#define FALSE 0UL +#endif + +#endif + From 434657a2582362367ba2a94f827511252001368f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:10:30 -0700 Subject: [PATCH 036/437] dos2unix and Lindent --- linux-core/xgi_cmdlist.c | 705 ++++----- linux-core/xgi_cmdlist.h | 155 +- linux-core/xgi_drv.c | 3174 +++++++++++++++++++------------------- linux-core/xgi_drv.h | 728 ++++----- linux-core/xgi_fb.c | 1019 ++++++------ linux-core/xgi_fb.h | 141 +- linux-core/xgi_linux.h | 1187 +++++++------- linux-core/xgi_misc.c | 1287 ++++++++-------- linux-core/xgi_misc.h | 96 +- linux-core/xgi_pcie.c | 2091 +++++++++++++------------ linux-core/xgi_pcie.h | 146 +- linux-core/xgi_regs.h | 814 +++++----- linux-core/xgi_types.h | 135 +- 13 files changed, 5765 insertions(+), 5913 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 024b021c..e00ea228 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -1,348 +1,357 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_misc.h" -#include "xgi_cmdlist.h" - - - -U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = -{ - 0x10000000, // 3D Type Begin, Invalid - 0x80000004, // Length = 4; - 0x00000000, - 0x00000000 -}; - -U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = -{ - FLUSH_2D, - FLUSH_2D, - FLUSH_2D, - FLUSH_2D -}; - -xgi_cmdring_info_t s_cmdring; - -static void addFlush2D(xgi_info_t *info); -static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo); -static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter); -static void xgi_cmdlist_reset(void); - -int xgi_cmdlist_initialize(xgi_info_t *info, U32 size) -{ - //xgi_mem_req_t mem_req; - xgi_mem_alloc_t mem_alloc; - - //mem_req.size = size; - - xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); - - if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) - { - return -1; - } - - s_cmdring._cmdRingSize = mem_alloc.size; - s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; - s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; - - return 1; -} - -void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t *pCmdInfo) -{ - U32 beginPort; - /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ - - /* Jong 05/25/2006 */ - /* return; */ - - beginPort = getCurBatchBeginPort(pCmdInfo); - XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); - - /* Jong 05/25/2006 */ - /* return; */ - - if (s_cmdring._lastBatchStartAddr == 0) - { - U32 portOffset; - - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ - - /* Jong 06132006; BASE_3D_ENG=0x2800 */ - /* beginPort: 2D: 0x30 */ - portOffset = BASE_3D_ENG + beginPort; - - // Enable PCI Trigger Mode - XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); - - /* Jong 05/25/2006 */ - /* return; */ - - /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ - XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", M2REG_AUTO_LINK_SETTING_ADDRESS); - XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", M2REG_CLEAR_COUNTERS_MASK); - XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); - XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", M2REG_PCI_TRIGGER_MODE_MASK); - - /* Jong 06/14/2006; 0x400001a */ - XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK); - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | - M2REG_CLEAR_COUNTERS_MASK | - 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); - - /* Jong 05/25/2006 */ - XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); - /* return; */ /* OK */ - - /* Jong 06/14/2006; 0x400000a */ - XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK); - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | - 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); - - // Send PCI begin command - XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); - /* return; */ - - XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", portOffset); - XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); - - /* beginPort = 48; */ - /* 0xc100000 */ - dwWriteReg(portOffset, (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort<<22)); - XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); - XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", pCmdInfo->_curDebugID); - XGI_INFO("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); - /* return; */ /* OK */ - - /* 0x80000024 */ - dwWriteReg(portOffset+4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", BEGIN_LINK_ENABLE_MASK); - XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", pCmdInfo->_firstSize); - XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); - - /* 0x1010000 */ - dwWriteReg(portOffset+8, (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", pCmdInfo->_firstBeginAddr); - XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); - - /* Jong 06/13/2006 */ - xgi_dump_register(info); - - /* Jong 06/12/2006; system hang; marked for test */ - dwWriteReg(portOffset+12, 0); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); - - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ - } - else - { - XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); - U32 *lastBatchVirtAddr; - - /* Jong 05/25/2006 */ - /* return; */ - - if (pCmdInfo->_firstBeginType == BTYPE_3D) - { - addFlush2D(info); - } - - lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; - lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; - lastBatchVirtAddr[3] = 0; - //barrier(); - lastBatchVirtAddr[0] = (beginPort<<22) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID); - - /* Jong 06/12/2006; system hang; marked for test */ - triggerHWCommandList(info, pCmdInfo->_beginCount); - - XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); - } - - s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); -} - - -/* - state: 0 - console - 1 - graphic - 2 - fb - 3 - logout -*/ -void xgi_state_change(xgi_info_t *info, xgi_state_info_t *pStateInfo) -{ -#define STATE_CONSOLE 0 -#define STATE_GRAPHIC 1 -#define STATE_FBTERM 2 -#define STATE_LOGOUT 3 -#define STATE_REBOOT 4 -#define STATE_SHUTDOWN 5 - - if ((pStateInfo->_fromState == STATE_GRAPHIC) - && (pStateInfo->_toState == STATE_CONSOLE)) - { - XGI_INFO("[kd] I see, now is to leaveVT\n"); - // stop to received batch - } - else if ((pStateInfo->_fromState == STATE_CONSOLE) - && (pStateInfo->_toState == STATE_GRAPHIC)) - { - XGI_INFO("[kd] I see, now is to enterVT\n"); - xgi_cmdlist_reset(); - } - else if ((pStateInfo->_fromState == STATE_GRAPHIC) - && ( (pStateInfo->_toState == STATE_LOGOUT) - ||(pStateInfo->_toState == STATE_REBOOT) - ||(pStateInfo->_toState == STATE_SHUTDOWN))) - { - XGI_INFO("[kd] I see, not is to exit from X\n"); - // stop to received batch - } - else - { - XGI_ERROR("[kd] Should not happen\n"); - } - -} - -void xgi_cmdlist_reset(void) -{ - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; -} - -void xgi_cmdlist_cleanup(xgi_info_t *info) -{ - if (s_cmdring._cmdRingBuffer != 0) - { - xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); - s_cmdring._cmdRingBuffer = 0; - s_cmdring._cmdRingOffset = 0; - s_cmdring._cmdRingSize = 0; - } -} - -static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter) -{ - static U32 s_triggerID = 1; - - //Fix me, currently we just trigger one time - while (triggerCounter--) - { - dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, - 0x05000000 + (0xffff & s_triggerID++)); - // xgi_waitfor_pci_idle(info); - } -} - -static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo) -{ - // Convert the batch type to begin port ID - switch(pCmdInfo->_firstBeginType) - { - case BTYPE_2D: - return 0x30; - case BTYPE_3D: - return 0x40; - case BTYPE_FLIP: - return 0x50; - case BTYPE_CTRL: - return 0x20; - default: - //ASSERT(0); - return 0xff; - } -} - -static void addFlush2D(xgi_info_t *info) -{ - U32 *flushBatchVirtAddr; - U32 flushBatchHWAddr; - - U32 *lastBatchVirtAddr; - - /* check buf is large enough to contain a new flush batch */ - if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) - { - s_cmdring._cmdRingOffset = 0; - } - - flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; - flushBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, flushBatchHWAddr); - - /* not using memcpy for I assume the address is discrete */ - *(flushBatchVirtAddr + 0) = 0x10000000; - *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ - *(flushBatchVirtAddr + 2) = 0x00000000; - *(flushBatchVirtAddr + 3) = 0x00000000; - *(flushBatchVirtAddr + 4) = FLUSH_2D; - *(flushBatchVirtAddr + 5) = FLUSH_2D; - *(flushBatchVirtAddr + 6) = FLUSH_2D; - *(flushBatchVirtAddr + 7) = FLUSH_2D; - - // ASSERT(s_cmdring._lastBatchStartAddr != NULL); - lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; - lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; - lastBatchVirtAddr[3] = 0; - - //barrier(); - - // BTYPE_CTRL & NO debugID - lastBatchVirtAddr[0] = (0x20<<22) + (BEGIN_VALID_MASK); - - triggerHWCommandList(info, 1); - - s_cmdring._cmdRingOffset += 0x20; - s_cmdring._lastBatchStartAddr = flushBatchHWAddr; -} + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = { + 0x10000000, // 3D Type Begin, Invalid + 0x80000004, // Length = 4; + 0x00000000, + 0x00000000 +}; + +U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { + FLUSH_2D, + FLUSH_2D, + FLUSH_2D, + FLUSH_2D +}; + +xgi_cmdring_info_t s_cmdring; + +static void addFlush2D(xgi_info_t * info); +static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo); +static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter); +static void xgi_cmdlist_reset(void); + +int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) +{ + //xgi_mem_req_t mem_req; + xgi_mem_alloc_t mem_alloc; + + //mem_req.size = size; + + xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + + if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { + return -1; + } + + s_cmdring._cmdRingSize = mem_alloc.size; + s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; + s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; + + return 1; +} + +void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) +{ + U32 beginPort; + /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + + /* Jong 05/25/2006 */ + /* return; */ + + beginPort = getCurBatchBeginPort(pCmdInfo); + XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + if (s_cmdring._lastBatchStartAddr == 0) { + U32 portOffset; + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + + /* Jong 06132006; BASE_3D_ENG=0x2800 */ + /* beginPort: 2D: 0x30 */ + portOffset = BASE_3D_ENG + beginPort; + + // Enable PCI Trigger Mode + XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ + XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", + M2REG_AUTO_LINK_SETTING_ADDRESS); + XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", + M2REG_CLEAR_COUNTERS_MASK); + XGI_INFO + ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); + XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", + M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 06/14/2006; 0x400001a */ + XGI_INFO + ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 05/25/2006 */ + XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); + /* return; *//* OK */ + + /* Jong 06/14/2006; 0x400000a */ + XGI_INFO + ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + // Send PCI begin command + XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); + /* return; */ + + XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", + portOffset); + XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + + /* beginPort = 48; */ + /* 0xc100000 */ + dwWriteReg(portOffset, + (beginPort << 22) + (BEGIN_VALID_MASK) + + pCmdInfo->_curDebugID); + XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort << 22)); + XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); + XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", + pCmdInfo->_curDebugID); + XGI_INFO + ("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", + (beginPort << 22) + (BEGIN_VALID_MASK) + + pCmdInfo->_curDebugID); + XGI_INFO + ("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); + /* return; *//* OK */ + + /* 0x80000024 */ + dwWriteReg(portOffset + 4, + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", + BEGIN_LINK_ENABLE_MASK); + XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", + pCmdInfo->_firstSize); + XGI_INFO + ("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); + + /* 0x1010000 */ + dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", + pCmdInfo->_firstBeginAddr); + XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", + (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); + + /* Jong 06/13/2006 */ + xgi_dump_register(info); + + /* Jong 06/12/2006; system hang; marked for test */ + dwWriteReg(portOffset + 12, 0); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + } else { + XGI_INFO + ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); + U32 *lastBatchVirtAddr; + + /* Jong 05/25/2006 */ + /* return; */ + + if (pCmdInfo->_firstBeginType == BTYPE_3D) { + addFlush2D(info); + } + + lastBatchVirtAddr = + (U32 *) xgi_find_pcie_virt(info, + s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; + lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; + lastBatchVirtAddr[3] = 0; + //barrier(); + lastBatchVirtAddr[0] = + (beginPort << 22) + (BEGIN_VALID_MASK) + + (0xffff & pCmdInfo->_curDebugID); + + /* Jong 06/12/2006; system hang; marked for test */ + triggerHWCommandList(info, pCmdInfo->_beginCount); + + XGI_INFO + ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); + } + + s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; + XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); +} + +/* + state: 0 - console + 1 - graphic + 2 - fb + 3 - logout +*/ +void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo) +{ +#define STATE_CONSOLE 0 +#define STATE_GRAPHIC 1 +#define STATE_FBTERM 2 +#define STATE_LOGOUT 3 +#define STATE_REBOOT 4 +#define STATE_SHUTDOWN 5 + + if ((pStateInfo->_fromState == STATE_GRAPHIC) + && (pStateInfo->_toState == STATE_CONSOLE)) { + XGI_INFO("[kd] I see, now is to leaveVT\n"); + // stop to received batch + } else if ((pStateInfo->_fromState == STATE_CONSOLE) + && (pStateInfo->_toState == STATE_GRAPHIC)) { + XGI_INFO("[kd] I see, now is to enterVT\n"); + xgi_cmdlist_reset(); + } else if ((pStateInfo->_fromState == STATE_GRAPHIC) + && ((pStateInfo->_toState == STATE_LOGOUT) + || (pStateInfo->_toState == STATE_REBOOT) + || (pStateInfo->_toState == STATE_SHUTDOWN))) { + XGI_INFO("[kd] I see, not is to exit from X\n"); + // stop to received batch + } else { + XGI_ERROR("[kd] Should not happen\n"); + } + +} + +void xgi_cmdlist_reset(void) +{ + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; +} + +void xgi_cmdlist_cleanup(xgi_info_t * info) +{ + if (s_cmdring._cmdRingBuffer != 0) { + xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); + s_cmdring._cmdRingBuffer = 0; + s_cmdring._cmdRingOffset = 0; + s_cmdring._cmdRingSize = 0; + } +} + +static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) +{ + static U32 s_triggerID = 1; + + //Fix me, currently we just trigger one time + while (triggerCounter--) { + dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0xffff & s_triggerID++)); + // xgi_waitfor_pci_idle(info); + } +} + +static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) +{ + // Convert the batch type to begin port ID + switch (pCmdInfo->_firstBeginType) { + case BTYPE_2D: + return 0x30; + case BTYPE_3D: + return 0x40; + case BTYPE_FLIP: + return 0x50; + case BTYPE_CTRL: + return 0x20; + default: + //ASSERT(0); + return 0xff; + } +} + +static void addFlush2D(xgi_info_t * info) +{ + U32 *flushBatchVirtAddr; + U32 flushBatchHWAddr; + + U32 *lastBatchVirtAddr; + + /* check buf is large enough to contain a new flush batch */ + if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { + s_cmdring._cmdRingOffset = 0; + } + + flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; + flushBatchVirtAddr = (U32 *) xgi_find_pcie_virt(info, flushBatchHWAddr); + + /* not using memcpy for I assume the address is discrete */ + *(flushBatchVirtAddr + 0) = 0x10000000; + *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ + *(flushBatchVirtAddr + 2) = 0x00000000; + *(flushBatchVirtAddr + 3) = 0x00000000; + *(flushBatchVirtAddr + 4) = FLUSH_2D; + *(flushBatchVirtAddr + 5) = FLUSH_2D; + *(flushBatchVirtAddr + 6) = FLUSH_2D; + *(flushBatchVirtAddr + 7) = FLUSH_2D; + + // ASSERT(s_cmdring._lastBatchStartAddr != NULL); + lastBatchVirtAddr = + (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; + lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; + lastBatchVirtAddr[3] = 0; + + //barrier(); + + // BTYPE_CTRL & NO debugID + lastBatchVirtAddr[0] = (0x20 << 22) + (BEGIN_VALID_MASK); + + triggerHWCommandList(info, 1); + + s_cmdring._cmdRingOffset += 0x20; + s_cmdring._lastBatchStartAddr = flushBatchHWAddr; +} diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 1b0c4965..5fe1de71 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -1,79 +1,76 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_CMDLIST_H_ -#define _XGI_CMDLIST_H_ - -#define ONE_BIT_MASK 0x1 -#define TWENTY_BIT_MASK 0xfffff -#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) -#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK -#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) -#define BASE_3D_ENG 0x2800 -#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 -#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) -#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) -#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) -#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) -#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 - -typedef enum -{ - FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, - FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, - FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK -}FLUSH_CODE; - -typedef enum -{ - AGPCMDLIST_SCRATCH_SIZE = 0x100, - AGPCMDLIST_BEGIN_SIZE = 0x004, - AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, - AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, - AGPCMDLIST_FLUSH_CMD_LEN = 0x004, - AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE -}CMD_SIZE; - -typedef struct xgi_cmdring_info_s -{ - U32 _cmdRingSize; - U32 _cmdRingBuffer; - U32 _cmdRingBusAddr; - U32 _lastBatchStartAddr; - U32 _cmdRingOffset; -}xgi_cmdring_info_t; - -extern int xgi_cmdlist_initialize(xgi_info_t *info, U32 size); - -extern void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t * pCmdInfo); - -extern void xgi_state_change(xgi_info_t *info, xgi_state_info_t * pStateInfo); - -extern void xgi_cmdlist_cleanup(xgi_info_t *info); - -#endif /* _XGI_CMDLIST_H_ */ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_CMDLIST_H_ +#define _XGI_CMDLIST_H_ + +#define ONE_BIT_MASK 0x1 +#define TWENTY_BIT_MASK 0xfffff +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define BASE_3D_ENG 0x2800 +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 + +typedef enum { + FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, + FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, + FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK +} FLUSH_CODE; + +typedef enum { + AGPCMDLIST_SCRATCH_SIZE = 0x100, + AGPCMDLIST_BEGIN_SIZE = 0x004, + AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, + AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, + AGPCMDLIST_FLUSH_CMD_LEN = 0x004, + AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE +} CMD_SIZE; + +typedef struct xgi_cmdring_info_s { + U32 _cmdRingSize; + U32 _cmdRingBuffer; + U32 _cmdRingBusAddr; + U32 _lastBatchStartAddr; + U32 _cmdRingOffset; +} xgi_cmdring_info_t; + +extern int xgi_cmdlist_initialize(xgi_info_t * info, U32 size); + +extern void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo); + +extern void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo); + +extern void xgi_cmdlist_cleanup(xgi_info_t * info); + +#endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 5e80d417..0c37d00e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -1,1610 +1,1564 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_pcie.h" -#include "xgi_misc.h" -#include "xgi_cmdlist.h" - -/* for debug */ -static int xgi_temp = 1; -/* - * global parameters - */ -static struct xgi_dev { - u16 vendor; - u16 device; - const char *name; -} xgidev_list[] = { - {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, - {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, - {0, 0, NULL} -}; - -int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ - -static int xgi_num_devices = 0; - -xgi_info_t xgi_devices[XGI_MAX_DEVICES]; - -#if defined(XGI_PM_SUPPORT_APM) -static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; -#endif - -/* add one for the control device */ -xgi_info_t xgi_ctl_device; -wait_queue_head_t xgi_ctl_waitqueue; - -#ifdef CONFIG_PROC_FS -struct proc_dir_entry *proc_xgi; -#endif - -#ifdef CONFIG_DEVFS_FS -devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; -#endif - -struct list_head xgi_mempid_list; - -/* xgi_ functions.. do not take a state device parameter */ -static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info); -static void xgi_proc_create(void); -static void xgi_proc_remove_all(struct proc_dir_entry *); -static void xgi_proc_remove(void); - -/* xgi_kern_ functions, interfaces used by linux kernel */ -int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); - -unsigned int xgi_kern_poll(struct file *, poll_table *); -int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); -int xgi_kern_mmap(struct file *, struct vm_area_struct *); -int xgi_kern_open(struct inode *, struct file *); -int xgi_kern_release(struct inode *inode, struct file *filp); - -void xgi_kern_vma_open(struct vm_area_struct *vma); -void xgi_kern_vma_release(struct vm_area_struct *vma); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type); -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access); -#endif - -int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); - -int xgi_kern_ctl_open(struct inode *, struct file *); -int xgi_kern_ctl_close(struct inode *, struct file *); -unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); - -void xgi_kern_isr_bh(unsigned long); -irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); - -static void xgi_lock_init(xgi_info_t *info); - -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_kern_acpi_standby(struct pci_dev *, u32); -int xgi_kern_acpi_resume(struct pci_dev *); -#endif - -/* - * verify access to pci config space wasn't disabled behind our back - * unfortunately, XFree86 enables/disables memory access in pci config space at - * various times (such as restoring initial pci config space settings during vt - * switches or when doing mulicard). As a result, all of our register accesses - * are garbage at this point. add a check to see if access was disabled and - * reenable any such access. - */ -#define XGI_CHECK_PCI_CONFIG(xgi) \ - xgi_check_pci_config(xgi, __LINE__) - -static inline void xgi_check_pci_config(xgi_info_t *info, int line) -{ - unsigned short cmd, flag = 0; - - // don't do this on the control device, only the actual devices - if (info->flags & XGI_FLAG_CONTROL) - return; - - pci_read_config_word(info->dev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_MASTER)) - { - XGI_INFO("restoring bus mastering! (%d)\n", line); - cmd |= PCI_COMMAND_MASTER; - flag = 1; - } - - if (!(cmd & PCI_COMMAND_MEMORY)) - { - XGI_INFO("restoring MEM access! (%d)\n", line); - cmd |= PCI_COMMAND_MEMORY; - flag = 1; - } - - if (flag) - pci_write_config_word(info->dev, PCI_COMMAND, cmd); -} - -static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info) -{ - return 1; -} - -/* - * struct pci_device_id { - * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID - * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID - * unsigned int class, class_mask; // (class,subclass,prog-if) triplet - * unsigned long driver_data; // Data private to the driver - * }; - */ - -static struct pci_device_id xgi_dev_table[] = { - { - .vendor = PCI_VENDOR_ID_XGI, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_DISPLAY_VGA << 8), - .class_mask = ~0, - }, - { } -}; - -/* - * #define MODULE_DEVICE_TABLE(type,name) \ - * MODULE_GENERIC_TABLE(type##_device,name) - */ - MODULE_DEVICE_TABLE(pci, xgi_dev_table); - -/* - * struct pci_driver { - * struct list_head node; - * char *name; - * const struct pci_device_id *id_table; // NULL if wants all devices - * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted - * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) - * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context - * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended - * int (*resume)(struct pci_dev *dev); // Device woken up - * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event - * }; - */ -static struct pci_driver xgi_pci_driver = { - .name = "xgi", - .id_table = xgi_dev_table, - .probe = xgi_kern_probe, -#if defined(XGI_SUPPORT_ACPI) - .suspend = xgi_kern_acpi_standby, - .resume = xgi_kern_acpi_resume, -#endif -}; - -/* - * find xgi devices and set initial state - */ -int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) -{ - xgi_info_t *info; - - if ((dev->vendor != PCI_VENDOR_ID_XGI) - || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) - { - return -1; - } - - if (xgi_num_devices == XGI_MAX_DEVICES) - { - XGI_INFO("maximum device number (%d) reached!\n", xgi_num_devices); - return -1; - } - - /* enable io, mem, and bus-mastering in pci config space */ - if (pci_enable_device(dev) != 0) - { - XGI_INFO("pci_enable_device failed, aborting\n"); - return -1; - } - - XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); - - pci_set_master(dev); - - info = &xgi_devices[xgi_num_devices]; - info->dev = dev; - info->vendor_id = dev->vendor; - info->device_id = dev->device; - info->bus = dev->bus->number; - info->slot = PCI_SLOT((dev)->devfn); - - xgi_lock_init(info); - - info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); - info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); - - /* check IO region */ - if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) - { - XGI_ERROR("cannot reserve MMIO memory\n"); - goto error_disable_dev; - } - - XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); - XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); - - info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, - info->mmio.size); - if (!info->mmio.vbase) - { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("info->mmio.vbase failed\n"); - goto error_disable_dev; - } - xgi_enable_mmio(info); - - //xgi_enable_ge(info); - - XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); - - info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); - info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); - - XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - /* check frame buffer region - if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) - { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("cannot reserve frame buffer memory\n"); - goto error_disable_dev; - } - - - info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, - info->fb.size); - - if (!info->fb.vbase) - { - release_mem_region(info->mmio.base, info->mmio.size); - release_mem_region(info->fb.base, info->fb.size); - XGI_ERROR("info->fb.vbase failed\n"); - goto error_disable_dev; - } - */ - info->fb.vbase = NULL; - XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); - - info->irq = dev->irq; - - /* check common error condition */ - if (info->irq == 0) - { - XGI_ERROR("Can't find an IRQ for your XGI card! \n"); - goto error_zero_dev; - } - XGI_INFO("info->irq: %lx \n", info->irq); - - //xgi_enable_dvi_interrupt(info); - - /* sanity check the IO apertures */ - if ((info->mmio.base == 0) || (info->mmio.size == 0) - || (info->fb.base == 0) || (info->fb.size == 0)) - { - XGI_ERROR("The IO regions for your XGI card are invalid.\n"); - - if ((info->mmio.base == 0) || (info->mmio.size == 0)) - { - XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", - info->mmio.base, - info->mmio.size); - } - - if ((info->fb.base == 0) || (info->fb.size == 0)) - { - XGI_ERROR("frame buffer appears to be wrong: 0x%lx 0x%lx\n", - info->fb.base, - info->fb.size); - } - - goto error_zero_dev; - } - - //xgi_num_devices++; - - return 0; - -error_zero_dev: - release_mem_region(info->fb.base, info->fb.size); - release_mem_region(info->mmio.base, info->mmio.size); - -error_disable_dev: - pci_disable_device(dev); - return -1; - -} - -/* - * vma operations... - * this is only called when the vmas are duplicated. this - * appears to only happen when the process is cloned to create - * a new process, and not when the process is threaded. - * - * increment the usage count for the physical pages, so when - * this clone unmaps the mappings, the pages are not - * deallocated under the original process. - */ -struct vm_operations_struct xgi_vm_ops = { - .open = xgi_kern_vma_open, - .close = xgi_kern_vma_release, - .nopage = xgi_kern_vma_nopage, -}; - -void xgi_kern_vma_open(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) - { - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_INC(block->use_count); - } -} - -void xgi_kern_vma_release(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) - { - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_DEC(block->use_count); - - /* - * if use_count is down to 0, the kernel virtual mapping was freed - * but the underlying physical pages were not, we need to clear the - * bit and free the physical pages. - */ - if (XGI_ATOMIC_READ(block->use_count) == 0) - { - // Need TO Finish - XGI_VMA_PRIVATE(vma) = NULL; - } - } -} - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) - { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, - block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); -out: - return page; - -} -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access) -{ - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) - { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, - block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); -out: - return page; -} -#endif - -#if 0 -static struct file_operations xgi_fops = { - /* owner: THIS_MODULE, */ - poll: xgi_kern_poll, - ioctl: xgi_kern_ioctl, - mmap: xgi_kern_mmap, - open: xgi_kern_open, - release: xgi_kern_release, -}; -#endif - -static struct file_operations xgi_fops = { - .owner = THIS_MODULE, - .poll = xgi_kern_poll, - .ioctl = xgi_kern_ioctl, - .mmap = xgi_kern_mmap, - .open = xgi_kern_open, - .release = xgi_kern_release, -}; - -static xgi_file_private_t * xgi_alloc_file_private(void) -{ - xgi_file_private_t *fp; - - XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); - if (!fp) - return NULL; - - memset(fp, 0, sizeof(xgi_file_private_t)); - - /* initialize this file's event queue */ - init_waitqueue_head(&fp->wait_queue); - - xgi_init_lock(fp->fp_lock); - - return fp; -} - -static void xgi_free_file_private(xgi_file_private_t *fp) -{ - if (fp == NULL) - return; - - XGI_KFREE(fp, sizeof(xgi_file_private_t)); -} - -int xgi_kern_open(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = NULL; - int dev_num; - int result = 0, status; - - /* - * the type and num values are only valid if we are not using devfs. - * However, since we use them to retrieve the device pointer, we - * don't need them with devfs as filp->private_data is already - * initialized - */ - filp->private_data = xgi_alloc_file_private(); - if (filp->private_data == NULL) - return -ENOMEM; - - XGI_INFO("filp->private_data %p\n", filp->private_data); - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_open(inode, filp); - - /* what device are we talking about? */ - dev_num = XGI_DEVICE_NUMBER(inode); - if (dev_num >= XGI_MAX_DEVICES) - { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - return -ENODEV; - } - - info = &xgi_devices[dev_num]; - - XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); - - xgi_down(info->info_sem); - XGI_CHECK_PCI_CONFIG(info); - - XGI_INFO_FROM_FP(filp) = info; - - /* - * map the memory and allocate isr on first open - */ - - if (!(info->flags & XGI_FLAG_OPEN)) - { - XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); - - if (info->device_id == 0) - { - XGI_INFO("open of nonexistent device %d\n", dev_num); - result = -ENXIO; - goto failed; - } - - /* initialize struct irqaction */ - status = request_irq(info->irq, xgi_kern_isr, - SA_INTERRUPT | SA_SHIRQ, "xgi", - (void *) info); - if (status != 0) - { - if (info->irq && (status == -EBUSY)) - { - XGI_ERROR("Tried to get irq %d, but another driver", - (unsigned int) info->irq); - XGI_ERROR("has it and is not sharing it.\n"); - } - XGI_ERROR("isr request failed 0x%x\n", status); - result = -EIO; - goto failed; - } - - /* - * #define DECLARE_TASKLET(name, func, data) \ - * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } - */ - info->tasklet.func = xgi_kern_isr_bh; - info->tasklet.data = (unsigned long) info; - tasklet_enable(&info->tasklet); - - /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - xgi_cmdlist_initialize(info, 0x100000); - - info->flags |= XGI_FLAG_OPEN; - } - - XGI_ATOMIC_INC(info->use_count); - -failed: - xgi_up(info->info_sem); - - if ((result) && filp->private_data) - { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - } - - return result; -} - -int xgi_kern_release(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - - XGI_CHECK_PCI_CONFIG(info); - - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_close(inode, filp); - - XGI_INFO("Jong-xgi_kern_release on device %d\n", XGI_DEVICE_NUMBER(inode)); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) - { - - /* - * The usage count for this device has dropped to zero, it can be shut - * down safely; disable its interrupts. - */ - - /* - * Disable this device's tasklet to make sure that no bottom half will - * run with undefined device state. - */ - tasklet_disable(&info->tasklet); - - /* - * Free the IRQ, which may block until all pending interrupt processing - * has completed. - */ - free_irq(info->irq, (void *)info); - - xgi_cmdlist_cleanup(info); - - /* leave INIT flag alone so we don't reinit every time */ - info->flags &= ~XGI_FLAG_OPEN; - } - - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) - { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) -{ - //struct inode *inode = INODE_FROM_FP(filp); - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - xgi_pcie_block_t *block; - int pages = 0; - unsigned long prot; - - XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma)); - - XGI_CHECK_PCI_CONFIG(info); - - if (XGI_MASK_OFFSET(vma->vm_start) - || XGI_MASK_OFFSET(vma->vm_end)) - { - XGI_ERROR("VM: bad mmap range: %lx - %lx\n", - vma->vm_start, vma->vm_end); - return -ENXIO; - } - - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - - vma->vm_ops = &xgi_vm_ops; - - /* XGI IO(reg) space */ - if (IS_IO_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - /* mark it as IO so that we don't dump it on core dump */ - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap io space \n"); - } - /* XGI fb space */ - /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ - else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } - /* PCIE allocator */ - /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ - else if (IS_PCIE_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - xgi_down(info->pcie_sem); - - block = (xgi_pcie_block_t *)xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); - - if (block == NULL) - { - XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - if (block->page_count != pages) - { - XGI_ERROR("pre-allocated PCIE memory has wrong number of pages!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - vma->vm_private_data = block; - XGI_ATOMIC_INC(block->use_count); - xgi_up(info->pcie_sem); - - /* - * prevent the swapper from swapping it out - * mark the memory i/o so the buffers aren't - * dumped on core dumps */ - vma->vm_flags |= (VM_LOCKED | VM_IO); - - /* un-cached */ - prot = pgprot_val(vma->vm_page_prot); - /* - if (boot_cpu_data.x86 > 3) - prot |= _PAGE_PCD | _PAGE_PWT; - */ - vma->vm_page_prot = __pgprot(prot); - - XGI_INFO("VM: mmap pcie space \n"); - } -#if 0 - else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } -#endif - else - { - vma->vm_flags |= (VM_IO | VM_LOCKED); - XGI_ERROR("VM: mmap wrong range \n"); - } - - vma->vm_file = filp; - - return 0; -} - -unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) -{ - xgi_file_private_t *fp; - xgi_info_t *info; - unsigned int mask = 0; - unsigned long eflags; - - info = XGI_INFO_FROM_FP(filp); - - if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) - return xgi_kern_ctl_poll(filp, wait); - - fp = XGI_GET_FP(filp); - - if (!(filp->f_flags & O_NONBLOCK)) - { - /* add us to the list */ - poll_wait(filp, &fp->wait_queue, wait); - } - - xgi_lock_irqsave(fp->fp_lock, eflags); - - /* wake the user on any event */ - if (fp->num_events) - { - XGI_INFO("Hey, an event occured!\n"); - /* - * trigger the client, when they grab the event, - * we'll decrement the event count - */ - mask |= (POLLPRI|POLLIN); - } - xgi_unlock_irqsave(fp->fp_lock, eflags); - - return mask; -} - -int xgi_kern_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - xgi_info_t *info; - xgi_mem_alloc_t *alloc = NULL; - - int status = 0; - void *arg_copy; - int arg_size; - int err = 0; - - info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); - /* - * extract the type and number bitfields, and don't decode - * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() - */ - if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) return -ENOTTY; - if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) return -ENOTTY; - - /* - * the direction is a bitmask, and VERIFY_WRITE catches R/W - * transfers. `Type' is user-oriented, while - * access_ok is kernel-oriented, so the concept of "read" and - * "write" is reversed - */ - if (_IOC_DIR(cmd) & _IOC_READ) - { - err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); - } - else if (_IOC_DIR(cmd) & _IOC_WRITE) - { - err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); - } - if (err) return -EFAULT; - - XGI_CHECK_PCI_CONFIG(info); - - arg_size = _IOC_SIZE(cmd); - XGI_KMALLOC(arg_copy, arg_size); - if (arg_copy == NULL) - { - XGI_ERROR("failed to allocate ioctl memory\n"); - return -ENOMEM; - } - - /* Jong 05/25/2006 */ - /* copy_from_user(arg_copy, (void *)arg, arg_size); */ - if(copy_from_user(arg_copy, (void *)arg, arg_size)) - { - XGI_ERROR("failed to copyin ioctl data\n"); - XGI_INFO("Jong-copy_from_user-fail! \n"); - } - else - XGI_INFO("Jong-copy_from_user-OK! \n"); - - alloc = (xgi_mem_alloc_t *)arg_copy; - XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size); - - switch (_IOC_NR(cmd)) - { - case XGI_ESC_DEVICE_INFO: - XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); - xgi_get_device_info(info, (struct xgi_chip_info_s *) arg_copy); - break; - case XGI_ESC_POST_VBIOS: - XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); - break; - case XGI_ESC_FB_ALLOC: - XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); - break; - case XGI_ESC_FB_FREE: - XGI_INFO("Jong-xgi_ioctl_fb_free \n"); - xgi_fb_free(info, *(unsigned long *) arg_copy); - break; - case XGI_ESC_MEM_COLLECT: - XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); - xgi_mem_collect(info, (unsigned int *) arg_copy); - break; - case XGI_ESC_PCIE_ALLOC: - XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, ((xgi_mem_req_t *)arg_copy)->size, - ((xgi_mem_req_t *)arg_copy)->owner, alloc); - break; - case XGI_ESC_PCIE_FREE: - XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", *((unsigned long *) arg_copy)); - xgi_pcie_free(info, *((unsigned long *) arg_copy)); - break; - case XGI_ESC_PCIE_CHECK: - XGI_INFO("Jong-xgi_pcie_heap_check \n"); - xgi_pcie_heap_check(); - break; - case XGI_ESC_GET_SCREEN_INFO: - XGI_INFO("Jong-xgi_get_screen_info \n"); - xgi_get_screen_info(info, (struct xgi_screen_info_s *) arg_copy); - break; - case XGI_ESC_PUT_SCREEN_INFO: - XGI_INFO("Jong-xgi_put_screen_info \n"); - xgi_put_screen_info(info, (struct xgi_screen_info_s *) arg_copy); - break; - case XGI_ESC_MMIO_INFO: - XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); - xgi_get_mmio_info(info, (struct xgi_mmio_info_s *) arg_copy); - break; - case XGI_ESC_GE_RESET: - XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); - xgi_ge_reset(info); - break; - case XGI_ESC_SAREA_INFO: - XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); - xgi_sarea_info(info, (struct xgi_sarea_info_s *) arg_copy); - break; - case XGI_ESC_DUMP_REGISTER: - XGI_INFO("Jong-xgi_ioctl_dump_register \n"); - xgi_dump_register(info); - break; - case XGI_ESC_DEBUG_INFO: - XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); - xgi_restore_registers(info); - //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); - //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); - break; - case XGI_ESC_SUBMIT_CMDLIST: - XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); - xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); - break; - case XGI_ESC_TEST_RWINKERNEL: - XGI_INFO("Jong-xgi_test_rwinkernel \n"); - xgi_test_rwinkernel(info, *(unsigned long*) arg_copy); - break; - case XGI_ESC_STATE_CHANGE: - XGI_INFO("Jong-xgi_state_change \n"); - xgi_state_change(info, (xgi_state_info_t *) arg_copy); - break; - case XGI_ESC_CPUID: - XGI_INFO("Jong-XGI_ESC_CPUID \n"); - xgi_get_cpu_id((struct cpu_info_s*) arg_copy); - break; - default: - XGI_INFO("Jong-xgi_ioctl_default \n"); - status = -EINVAL; - break; - } - - if (copy_to_user((void *)arg, arg_copy, arg_size)) - { - XGI_ERROR("failed to copyout ioctl data\n"); - XGI_INFO("Jong-copy_to_user-fail! \n"); - } - else - XGI_INFO("Jong-copy_to_user-OK! \n"); - - XGI_KFREE(arg_copy, arg_size); - return status; -} - - -/* - * xgi control driver operations defined here - */ -int xgi_kern_ctl_open(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = &xgi_ctl_device; - - int rc = 0; - - XGI_INFO("Jong-xgi_kern_ctl_open\n"); - - xgi_down(info->info_sem); - info->device_number = XGI_CONTROL_DEVICE_NUMBER; - - /* save the xgi info in file->private_data */ - filp->private_data = info; - - if (XGI_ATOMIC_READ(info->use_count) == 0) - { - init_waitqueue_head(&xgi_ctl_waitqueue); - } - - info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; - - XGI_ATOMIC_INC(info->use_count); - xgi_up(info->info_sem); - - return rc; -} - -int xgi_kern_ctl_close(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-xgi_kern_ctl_close\n"); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) - { - info->flags = 0; - } - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) - { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table *wait) -{ - //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; - unsigned int ret = 0; - - if (!(filp->f_flags & O_NONBLOCK)) - { - poll_wait(filp, &xgi_ctl_waitqueue, wait); - } - - return ret; -} - -/* - * xgi proc system - */ -static u8 xgi_find_pcie_capability(struct pci_dev *dev) -{ - u16 status; - u8 cap_ptr, cap_id; - - pci_read_config_word(dev, PCI_STATUS, &status); - status &= PCI_STATUS_CAP_LIST; - if (!status) - return 0; - - switch (dev->hdr_type) - { - case PCI_HEADER_TYPE_NORMAL: - case PCI_HEADER_TYPE_BRIDGE: - pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); - break; - default: - return 0; - } - - do - { - cap_ptr &= 0xFC; - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); - } while (cap_ptr && cap_id != 0xFF); - - return 0; -} - -static struct pci_dev* xgi_get_pci_device(xgi_info_t *info) -{ - struct pci_dev *dev; - - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); - while (dev) - { - if (XGI_PCI_SLOT_NUMBER(dev) == info->slot - && XGI_PCI_BUS_NUMBER(dev) == info->bus) - return dev; - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); - } - - return NULL; -} - -int xgi_kern_read_card_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - struct pci_dev *dev; - char *type; - int len = 0; - - xgi_info_t *info; - info = (xgi_info_t *) data; - - dev = xgi_get_pci_device(info); - if (!dev) - return 0; - - type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; - len += sprintf(page+len, "Card Type: \t %s\n", type); - - XGI_PCI_DEV_PUT(dev); - return len; -} - -int xgi_kern_read_version(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - int len = 0; - - len += sprintf(page+len, "XGI version: %s\n", "1.0"); - len += sprintf(page+len, "GCC version: %s\n", "3.0"); - - return len; -} - -int xgi_kern_read_pcie_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} - -int xgi_kern_read_status(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} - - -static void xgi_proc_create(void) -{ -#ifdef CONFIG_PROC_FS - - struct pci_dev *dev; - int i = 0; - char name[6]; - - struct proc_dir_entry *entry; - struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; - - xgi_info_t *info; - xgi_info_t *xgi_max_devices; - - /* world readable directory */ - int flags = S_IFDIR | S_IRUGO | S_IXUGO; - - proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); - if (!proc_xgi) - goto failed; - - proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); - if (!proc_xgi_cards) - goto failed; - - proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); - if (!proc_xgi_pcie) - goto failed; - - /* - * Set the module owner to ensure that the reference - * count reflects accesses to the proc files. - */ - proc_xgi->owner = THIS_MODULE; - proc_xgi_cards->owner = THIS_MODULE; - proc_xgi_pcie->owner = THIS_MODULE; - - xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; - for (info = xgi_devices; info < xgi_max_devices; info++) - { - if (info->device_id == 0) - break; - - /* world readable file */ - flags = S_IFREG | S_IRUGO; - - dev = xgi_get_pci_device(info); - if (!dev) - break; - - sprintf(name, "%d", i++); - entry = create_proc_entry(name, flags, proc_xgi_cards); - if (!entry) - { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_card_info; - entry->owner = THIS_MODULE; - - if (xgi_find_pcie_capability(dev)) - { - entry = create_proc_entry("status", flags, proc_xgi_pcie); - if (!entry) - { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_status; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("card", flags, proc_xgi_pcie); - if (!entry) - { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - } - - XGI_PCI_DEV_PUT(dev); - } - - entry = create_proc_entry("version", flags, proc_xgi); - if (!entry) - goto failed; - - entry->read_proc = xgi_kern_read_version; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); - if (!entry) - goto failed; - - entry->data = NULL; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - - return; - -failed: - XGI_ERROR("failed to create /proc entries!\n"); - xgi_proc_remove_all(proc_xgi); -#endif -} - -#ifdef CONFIG_PROC_FS -static void xgi_proc_remove_all(struct proc_dir_entry *entry) -{ - while (entry) - { - struct proc_dir_entry *next = entry->next; - if (entry->subdir) - xgi_proc_remove_all(entry->subdir); - remove_proc_entry(entry->name, entry->parent); - if (entry == proc_xgi) - break; - entry = next; - } -} -#endif - -static void xgi_proc_remove(void) -{ -#ifdef CONFIG_PROC_FS - xgi_proc_remove_all(proc_xgi); -#endif -} - -/* - * driver receives an interrupt if someone waiting, then hand it off. - */ -irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) -{ - xgi_info_t *info = (xgi_info_t *) dev_id; - u32 need_to_run_bottom_half = 0; - - //XGI_INFO("xgi_kern_isr \n"); - - //XGI_CHECK_PCI_CONFIG(info); - - //xgi_dvi_irq_handler(info); - - if (need_to_run_bottom_half) - { - tasklet_schedule(&info->tasklet); - } - - return IRQ_HANDLED; -} - -void xgi_kern_isr_bh(unsigned long data) -{ - xgi_info_t *info = (xgi_info_t *) data; - - XGI_INFO("xgi_kern_isr_bh \n"); - - //xgi_dvi_irq_handler(info); - - XGI_CHECK_PCI_CONFIG(info); -} - -static void xgi_lock_init(xgi_info_t *info) -{ - if (info == NULL) return; - - spin_lock_init(&info->info_lock); - - sema_init(&info->info_sem, 1); - sema_init(&info->fb_sem, 1); - sema_init(&info->pcie_sem, 1); - - XGI_ATOMIC_SET(info->use_count, 0); -} - -static void xgi_dev_init(xgi_info_t *info) -{ - struct pci_dev *pdev = NULL; - struct xgi_dev *dev; - int found = 0; - u16 pci_cmd; - - XGI_INFO("Enter xgi_dev_init \n"); - - //XGI_PCI_FOR_EACH_DEV(pdev) - { - for (dev = xgidev_list; dev->vendor; dev++) - { - if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device)) - { - XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor); - XGI_INFO("dev->device = pdev->device= %x \n", dev->device); - - xgi_devices[found].device_id = pdev->device; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &xgi_devices[found].revision_id); - - XGI_INFO("PCI_REVISION_ID= %x \n", xgi_devices[found].revision_id); - - pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); - - XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); - - break; - } - } - } -} -/* - * Export to Linux Kernel - */ - -static int __init xgi_init_module(void) -{ - xgi_info_t *info = &xgi_devices[xgi_num_devices]; - int i, result; - - XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); - //SET_MODULE_OWNER(&xgi_fops); - - memset(xgi_devices, 0, sizeof(xgi_devices)); - - if (pci_register_driver(&xgi_pci_driver) < 0) - { - pci_unregister_driver(&xgi_pci_driver); - XGI_ERROR("no XGI graphics adapter found\n"); - return -ENODEV; - } - - XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.base); - XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.size); - -/* Jong 07/27/2006; test for ubuntu */ -/* -#ifdef CONFIG_DEVFS_FS - - XGI_INFO("Jong-Use devfs \n"); - do - { - xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); - if (xgi_devfs_handles[0] == NULL) - { - result = -ENOMEM; - XGI_ERROR("devfs register failed\n"); - goto failed; - } - } while(0); -#else */ /* no devfs, do it the "classic" way */ - - - XGI_INFO("Jong-Use non-devfs \n"); - /* - * Register your major, and accept a dynamic number. This is the - * first thing to do, in order to avoid releasing other module's - * fops in scull_cleanup_module() - */ - result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); - if (result < 0) - { - XGI_ERROR("register chrdev failed\n"); - pci_unregister_driver(&xgi_pci_driver); - return result; - } - if (xgi_major == 0) xgi_major = result; /* dynamic */ - -/* #endif */ /* CONFIG_DEVFS_FS */ - - XGI_INFO("Jong-major number %d\n", xgi_major); - - /* instantiate tasklets */ - for (i = 0; i < XGI_MAX_DEVICES; i++) - { - /* - * We keep one tasklet per card to avoid latency issues with more - * than one device; no two instances of a single tasklet are ever - * executed concurrently. - */ - XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); - } - - /* init the xgi control device */ - { - xgi_info_t *info_ctl = &xgi_ctl_device; - xgi_lock_init(info_ctl); - } - - /* Init the resource manager */ - INIT_LIST_HEAD(&xgi_mempid_list); - if (!xgi_fb_heap_init(info)) - { - XGI_ERROR("xgi_fb_heap_init() failed\n"); - result = -EIO; - goto failed; - } - - /* Init the resource manager */ - if (!xgi_pcie_heap_init(info)) - { - XGI_ERROR("xgi_pcie_heap_init() failed\n"); - result = -EIO; - goto failed; - } - - /* create /proc/driver/xgi */ - xgi_proc_create(); - -#if defined(DEBUG) - inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); -#endif - - return 0; - -failed: -#ifdef CONFIG_DEVFS_FS - XGI_DEVFS_REMOVE_CONTROL(); - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - for (i = 0; i < xgi_num_devices; i++) - { - if (xgi_devices[i].dev) - { - release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); - release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); - } - } - - pci_unregister_driver(&xgi_pci_driver); - return result; - - return 1; -} - -void __exit xgi_exit_module(void) -{ - int i; - xgi_info_t *info, *max_devices; - -#ifdef CONFIG_DEVFS_FS - /* - XGI_DEVFS_REMOVE_CONTROL(); - for (i = 0; i < XGI_MAX_DEVICES; i++) - XGI_DEVFS_REMOVE_DEVICE(i); - */ - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); - for (i = 0; i < XGI_MAX_DEVICES; i++) - { - if (xgi_devices[i].dev) - { - /* clean up the flush2D batch array */ - xgi_cmdlist_cleanup(&xgi_devices[i]); - - if(xgi_devices[i].fb.vbase != NULL) - { - iounmap((void *)xgi_devices[i].fb.vbase); - xgi_devices[i].fb.vbase = NULL; - } - if(xgi_devices[i].mmio.vbase != NULL) - { - iounmap((void *)xgi_devices[i].mmio.vbase); - xgi_devices[i].mmio.vbase = NULL; - } - - //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); - //XGI_INFO("release frame buffer mem region scceeded\n"); - - release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); - XGI_INFO("release MMIO mem region scceeded\n"); - - xgi_fb_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); - - xgi_pcie_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); - - XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); - } - } - - pci_unregister_driver(&xgi_pci_driver); - - /* remove /proc/driver/xgi */ - xgi_proc_remove(); - -#if defined(DEBUG) - inter_module_unregister("xgi_devices"); -#endif -} - -module_init(xgi_init_module); -module_exit(xgi_exit_module); - -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_acpi_event(struct pci_dev *dev, u32 state) -{ - return 1; -} - -int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) -{ - return 1; -} - -int xgi_kern_acpi_resume(struct pci_dev *dev) -{ - return 1; -} -#endif - -MODULE_AUTHOR("Andrea Zhang "); -MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); -MODULE_LICENSE("GPL"); + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +/* for debug */ +static int xgi_temp = 1; +/* + * global parameters + */ +static struct xgi_dev { + u16 vendor; + u16 device; + const char *name; +} xgidev_list[] = { + { + PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, { + PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, { + 0, 0, NULL} +}; + +int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ + +static int xgi_num_devices = 0; + +xgi_info_t xgi_devices[XGI_MAX_DEVICES]; + +#if defined(XGI_PM_SUPPORT_APM) +static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; +#endif + +/* add one for the control device */ +xgi_info_t xgi_ctl_device; +wait_queue_head_t xgi_ctl_waitqueue; + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry *proc_xgi; +#endif + +#ifdef CONFIG_DEVFS_FS +devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; +#endif + +struct list_head xgi_mempid_list; + +/* xgi_ functions.. do not take a state device parameter */ +static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info); +static void xgi_proc_create(void); +static void xgi_proc_remove_all(struct proc_dir_entry *); +static void xgi_proc_remove(void); + +/* xgi_kern_ functions, interfaces used by linux kernel */ +int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); + +unsigned int xgi_kern_poll(struct file *, poll_table *); +int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +int xgi_kern_mmap(struct file *, struct vm_area_struct *); +int xgi_kern_open(struct inode *, struct file *); +int xgi_kern_release(struct inode *inode, struct file *filp); + +void xgi_kern_vma_open(struct vm_area_struct *vma); +void xgi_kern_vma_release(struct vm_area_struct *vma); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access); +#endif + +int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); + +int xgi_kern_ctl_open(struct inode *, struct file *); +int xgi_kern_ctl_close(struct inode *, struct file *); +unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); + +void xgi_kern_isr_bh(unsigned long); +irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); + +static void xgi_lock_init(xgi_info_t * info); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_kern_acpi_standby(struct pci_dev *, u32); +int xgi_kern_acpi_resume(struct pci_dev *); +#endif + +/* + * verify access to pci config space wasn't disabled behind our back + * unfortunately, XFree86 enables/disables memory access in pci config space at + * various times (such as restoring initial pci config space settings during vt + * switches or when doing mulicard). As a result, all of our register accesses + * are garbage at this point. add a check to see if access was disabled and + * reenable any such access. + */ +#define XGI_CHECK_PCI_CONFIG(xgi) \ + xgi_check_pci_config(xgi, __LINE__) + +static inline void xgi_check_pci_config(xgi_info_t * info, int line) +{ + unsigned short cmd, flag = 0; + + // don't do this on the control device, only the actual devices + if (info->flags & XGI_FLAG_CONTROL) + return; + + pci_read_config_word(info->dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + XGI_INFO("restoring bus mastering! (%d)\n", line); + cmd |= PCI_COMMAND_MASTER; + flag = 1; + } + + if (!(cmd & PCI_COMMAND_MEMORY)) { + XGI_INFO("restoring MEM access! (%d)\n", line); + cmd |= PCI_COMMAND_MEMORY; + flag = 1; + } + + if (flag) + pci_write_config_word(info->dev, PCI_COMMAND, cmd); +} + +static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info) +{ + return 1; +} + +/* + * struct pci_device_id { + * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID + * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID + * unsigned int class, class_mask; // (class,subclass,prog-if) triplet + * unsigned long driver_data; // Data private to the driver + * }; + */ + +static struct pci_device_id xgi_dev_table[] = { + { + .vendor = PCI_VENDOR_ID_XGI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0, + }, + {} +}; + +/* + * #define MODULE_DEVICE_TABLE(type,name) \ + * MODULE_GENERIC_TABLE(type##_device,name) + */ +MODULE_DEVICE_TABLE(pci, xgi_dev_table); + +/* + * struct pci_driver { + * struct list_head node; + * char *name; + * const struct pci_device_id *id_table; // NULL if wants all devices + * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted + * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) + * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context + * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended + * int (*resume)(struct pci_dev *dev); // Device woken up + * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event + * }; + */ +static struct pci_driver xgi_pci_driver = { + .name = "xgi", + .id_table = xgi_dev_table, + .probe = xgi_kern_probe, +#if defined(XGI_SUPPORT_ACPI) + .suspend = xgi_kern_acpi_standby, + .resume = xgi_kern_acpi_resume, +#endif +}; + +/* + * find xgi devices and set initial state + */ +int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +{ + xgi_info_t *info; + + if ((dev->vendor != PCI_VENDOR_ID_XGI) + || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { + return -1; + } + + if (xgi_num_devices == XGI_MAX_DEVICES) { + XGI_INFO("maximum device number (%d) reached!\n", + xgi_num_devices); + return -1; + } + + /* enable io, mem, and bus-mastering in pci config space */ + if (pci_enable_device(dev) != 0) { + XGI_INFO("pci_enable_device failed, aborting\n"); + return -1; + } + + XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); + + pci_set_master(dev); + + info = &xgi_devices[xgi_num_devices]; + info->dev = dev; + info->vendor_id = dev->vendor; + info->device_id = dev->device; + info->bus = dev->bus->number; + info->slot = PCI_SLOT((dev)->devfn); + + xgi_lock_init(info); + + info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); + info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); + + /* check IO region */ + if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) { + XGI_ERROR("cannot reserve MMIO memory\n"); + goto error_disable_dev; + } + + XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); + XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); + + info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, + info->mmio.size); + if (!info->mmio.vbase) { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("info->mmio.vbase failed\n"); + goto error_disable_dev; + } + xgi_enable_mmio(info); + + //xgi_enable_ge(info); + + XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); + + info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); + info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); + + XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + /* check frame buffer region + if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) + { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("cannot reserve frame buffer memory\n"); + goto error_disable_dev; + } + + info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, + info->fb.size); + + if (!info->fb.vbase) + { + release_mem_region(info->mmio.base, info->mmio.size); + release_mem_region(info->fb.base, info->fb.size); + XGI_ERROR("info->fb.vbase failed\n"); + goto error_disable_dev; + } + */ + info->fb.vbase = NULL; + XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); + + info->irq = dev->irq; + + /* check common error condition */ + if (info->irq == 0) { + XGI_ERROR("Can't find an IRQ for your XGI card! \n"); + goto error_zero_dev; + } + XGI_INFO("info->irq: %lx \n", info->irq); + + //xgi_enable_dvi_interrupt(info); + + /* sanity check the IO apertures */ + if ((info->mmio.base == 0) || (info->mmio.size == 0) + || (info->fb.base == 0) || (info->fb.size == 0)) { + XGI_ERROR("The IO regions for your XGI card are invalid.\n"); + + if ((info->mmio.base == 0) || (info->mmio.size == 0)) { + XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", + info->mmio.base, info->mmio.size); + } + + if ((info->fb.base == 0) || (info->fb.size == 0)) { + XGI_ERROR + ("frame buffer appears to be wrong: 0x%lx 0x%lx\n", + info->fb.base, info->fb.size); + } + + goto error_zero_dev; + } + //xgi_num_devices++; + + return 0; + + error_zero_dev: + release_mem_region(info->fb.base, info->fb.size); + release_mem_region(info->mmio.base, info->mmio.size); + + error_disable_dev: + pci_disable_device(dev); + return -1; + +} + +/* + * vma operations... + * this is only called when the vmas are duplicated. this + * appears to only happen when the process is cloned to create + * a new process, and not when the process is threaded. + * + * increment the usage count for the physical pages, so when + * this clone unmaps the mappings, the pages are not + * deallocated under the original process. + */ +struct vm_operations_struct xgi_vm_ops = { + .open = xgi_kern_vma_open, + .close = xgi_kern_vma_release, + .nopage = xgi_kern_vma_nopage, +}; + +void xgi_kern_vma_open(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) { + xgi_pcie_block_t *block = + (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_INC(block->use_count); + } +} + +void xgi_kern_vma_release(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) { + xgi_pcie_block_t *block = + (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_DEC(block->use_count); + + /* + * if use_count is down to 0, the kernel virtual mapping was freed + * but the underlying physical pages were not, we need to clear the + * bit and free the physical pages. + */ + if (XGI_ATOMIC_READ(block->use_count) == 0) { + // Need TO Finish + XGI_VMA_PRIVATE(vma) = NULL; + } + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) + goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); + out: + return page; + +} +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) + goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); + out: + return page; +} +#endif + +#if 0 +static struct file_operations xgi_fops = { + /* owner: THIS_MODULE, */ + poll:xgi_kern_poll, + ioctl:xgi_kern_ioctl, + mmap:xgi_kern_mmap, + open:xgi_kern_open, + release:xgi_kern_release, +}; +#endif + +static struct file_operations xgi_fops = { + .owner = THIS_MODULE, + .poll = xgi_kern_poll, + .ioctl = xgi_kern_ioctl, + .mmap = xgi_kern_mmap, + .open = xgi_kern_open, + .release = xgi_kern_release, +}; + +static xgi_file_private_t *xgi_alloc_file_private(void) +{ + xgi_file_private_t *fp; + + XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); + if (!fp) + return NULL; + + memset(fp, 0, sizeof(xgi_file_private_t)); + + /* initialize this file's event queue */ + init_waitqueue_head(&fp->wait_queue); + + xgi_init_lock(fp->fp_lock); + + return fp; +} + +static void xgi_free_file_private(xgi_file_private_t * fp) +{ + if (fp == NULL) + return; + + XGI_KFREE(fp, sizeof(xgi_file_private_t)); +} + +int xgi_kern_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = NULL; + int dev_num; + int result = 0, status; + + /* + * the type and num values are only valid if we are not using devfs. + * However, since we use them to retrieve the device pointer, we + * don't need them with devfs as filp->private_data is already + * initialized + */ + filp->private_data = xgi_alloc_file_private(); + if (filp->private_data == NULL) + return -ENOMEM; + + XGI_INFO("filp->private_data %p\n", filp->private_data); + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_open(inode, filp); + + /* what device are we talking about? */ + dev_num = XGI_DEVICE_NUMBER(inode); + if (dev_num >= XGI_MAX_DEVICES) { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + return -ENODEV; + } + + info = &xgi_devices[dev_num]; + + XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); + + xgi_down(info->info_sem); + XGI_CHECK_PCI_CONFIG(info); + + XGI_INFO_FROM_FP(filp) = info; + + /* + * map the memory and allocate isr on first open + */ + + if (!(info->flags & XGI_FLAG_OPEN)) { + XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); + + if (info->device_id == 0) { + XGI_INFO("open of nonexistent device %d\n", dev_num); + result = -ENXIO; + goto failed; + } + + /* initialize struct irqaction */ + status = request_irq(info->irq, xgi_kern_isr, + SA_INTERRUPT | SA_SHIRQ, "xgi", + (void *)info); + if (status != 0) { + if (info->irq && (status == -EBUSY)) { + XGI_ERROR + ("Tried to get irq %d, but another driver", + (unsigned int)info->irq); + XGI_ERROR("has it and is not sharing it.\n"); + } + XGI_ERROR("isr request failed 0x%x\n", status); + result = -EIO; + goto failed; + } + + /* + * #define DECLARE_TASKLET(name, func, data) \ + * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + */ + info->tasklet.func = xgi_kern_isr_bh; + info->tasklet.data = (unsigned long)info; + tasklet_enable(&info->tasklet); + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + xgi_cmdlist_initialize(info, 0x100000); + + info->flags |= XGI_FLAG_OPEN; + } + + XGI_ATOMIC_INC(info->use_count); + + failed: + xgi_up(info->info_sem); + + if ((result) && filp->private_data) { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + } + + return result; +} + +int xgi_kern_release(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_CHECK_PCI_CONFIG(info); + + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_close(inode, filp); + + XGI_INFO("Jong-xgi_kern_release on device %d\n", + XGI_DEVICE_NUMBER(inode)); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { + + /* + * The usage count for this device has dropped to zero, it can be shut + * down safely; disable its interrupts. + */ + + /* + * Disable this device's tasklet to make sure that no bottom half will + * run with undefined device state. + */ + tasklet_disable(&info->tasklet); + + /* + * Free the IRQ, which may block until all pending interrupt processing + * has completed. + */ + free_irq(info->irq, (void *)info); + + xgi_cmdlist_cleanup(info); + + /* leave INIT flag alone so we don't reinit every time */ + info->flags &= ~XGI_FLAG_OPEN; + } + + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) +{ + //struct inode *inode = INODE_FROM_FP(filp); + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + xgi_pcie_block_t *block; + int pages = 0; + unsigned long prot; + + XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", + vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + + XGI_CHECK_PCI_CONFIG(info); + + if (XGI_MASK_OFFSET(vma->vm_start) + || XGI_MASK_OFFSET(vma->vm_end)) { + XGI_ERROR("VM: bad mmap range: %lx - %lx\n", + vma->vm_start, vma->vm_end); + return -ENXIO; + } + + pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + vma->vm_ops = &xgi_vm_ops; + + /* XGI IO(reg) space */ + if (IS_IO_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + /* mark it as IO so that we don't dump it on core dump */ + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap io space \n"); + } + /* XGI fb space */ + /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ + else if (IS_FB_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } + /* PCIE allocator */ + /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ + else if (IS_PCIE_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + xgi_down(info->pcie_sem); + + block = + (xgi_pcie_block_t *) xgi_find_pcie_block(info, + XGI_VMA_OFFSET + (vma)); + + if (block == NULL) { + XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + if (block->page_count != pages) { + XGI_ERROR + ("pre-allocated PCIE memory has wrong number of pages!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + vma->vm_private_data = block; + XGI_ATOMIC_INC(block->use_count); + xgi_up(info->pcie_sem); + + /* + * prevent the swapper from swapping it out + * mark the memory i/o so the buffers aren't + * dumped on core dumps */ + vma->vm_flags |= (VM_LOCKED | VM_IO); + + /* un-cached */ + prot = pgprot_val(vma->vm_page_prot); + /* + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD | _PAGE_PWT; + */ + vma->vm_page_prot = __pgprot(prot); + + XGI_INFO("VM: mmap pcie space \n"); + } +#if 0 + else if (IS_FB_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } +#endif + else { + vma->vm_flags |= (VM_IO | VM_LOCKED); + XGI_ERROR("VM: mmap wrong range \n"); + } + + vma->vm_file = filp; + + return 0; +} + +unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) +{ + xgi_file_private_t *fp; + xgi_info_t *info; + unsigned int mask = 0; + unsigned long eflags; + + info = XGI_INFO_FROM_FP(filp); + + if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) + return xgi_kern_ctl_poll(filp, wait); + + fp = XGI_GET_FP(filp); + + if (!(filp->f_flags & O_NONBLOCK)) { + /* add us to the list */ + poll_wait(filp, &fp->wait_queue, wait); + } + + xgi_lock_irqsave(fp->fp_lock, eflags); + + /* wake the user on any event */ + if (fp->num_events) { + XGI_INFO("Hey, an event occured!\n"); + /* + * trigger the client, when they grab the event, + * we'll decrement the event count + */ + mask |= (POLLPRI | POLLIN); + } + xgi_unlock_irqsave(fp->fp_lock, eflags); + + return mask; +} + +int xgi_kern_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + xgi_info_t *info; + xgi_mem_alloc_t *alloc = NULL; + + int status = 0; + void *arg_copy; + int arg_size; + int err = 0; + + info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), + _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); + /* + * extract the type and number bitfields, and don't decode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) + return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if (_IOC_DIR(cmd) & _IOC_READ) { + err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + } else if (_IOC_DIR(cmd) & _IOC_WRITE) { + err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); + } + if (err) + return -EFAULT; + + XGI_CHECK_PCI_CONFIG(info); + + arg_size = _IOC_SIZE(cmd); + XGI_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) { + XGI_ERROR("failed to allocate ioctl memory\n"); + return -ENOMEM; + } + + /* Jong 05/25/2006 */ + /* copy_from_user(arg_copy, (void *)arg, arg_size); */ + if (copy_from_user(arg_copy, (void *)arg, arg_size)) { + XGI_ERROR("failed to copyin ioctl data\n"); + XGI_INFO("Jong-copy_from_user-fail! \n"); + } else + XGI_INFO("Jong-copy_from_user-OK! \n"); + + alloc = (xgi_mem_alloc_t *) arg_copy; + XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, + arg_size); + + switch (_IOC_NR(cmd)) { + case XGI_ESC_DEVICE_INFO: + XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); + xgi_get_device_info(info, (struct xgi_chip_info_s *)arg_copy); + break; + case XGI_ESC_POST_VBIOS: + XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); + break; + case XGI_ESC_FB_ALLOC: + XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); + xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); + break; + case XGI_ESC_FB_FREE: + XGI_INFO("Jong-xgi_ioctl_fb_free \n"); + xgi_fb_free(info, *(unsigned long *)arg_copy); + break; + case XGI_ESC_MEM_COLLECT: + XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); + xgi_mem_collect(info, (unsigned int *)arg_copy); + break; + case XGI_ESC_PCIE_ALLOC: + XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); + xgi_pcie_alloc(info, ((xgi_mem_req_t *) arg_copy)->size, + ((xgi_mem_req_t *) arg_copy)->owner, alloc); + break; + case XGI_ESC_PCIE_FREE: + XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", + *((unsigned long *)arg_copy)); + xgi_pcie_free(info, *((unsigned long *)arg_copy)); + break; + case XGI_ESC_PCIE_CHECK: + XGI_INFO("Jong-xgi_pcie_heap_check \n"); + xgi_pcie_heap_check(); + break; + case XGI_ESC_GET_SCREEN_INFO: + XGI_INFO("Jong-xgi_get_screen_info \n"); + xgi_get_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + break; + case XGI_ESC_PUT_SCREEN_INFO: + XGI_INFO("Jong-xgi_put_screen_info \n"); + xgi_put_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + break; + case XGI_ESC_MMIO_INFO: + XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); + xgi_get_mmio_info(info, (struct xgi_mmio_info_s *)arg_copy); + break; + case XGI_ESC_GE_RESET: + XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); + xgi_ge_reset(info); + break; + case XGI_ESC_SAREA_INFO: + XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); + xgi_sarea_info(info, (struct xgi_sarea_info_s *)arg_copy); + break; + case XGI_ESC_DUMP_REGISTER: + XGI_INFO("Jong-xgi_ioctl_dump_register \n"); + xgi_dump_register(info); + break; + case XGI_ESC_DEBUG_INFO: + XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); + xgi_restore_registers(info); + //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + break; + case XGI_ESC_SUBMIT_CMDLIST: + XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); + xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); + break; + case XGI_ESC_TEST_RWINKERNEL: + XGI_INFO("Jong-xgi_test_rwinkernel \n"); + xgi_test_rwinkernel(info, *(unsigned long *)arg_copy); + break; + case XGI_ESC_STATE_CHANGE: + XGI_INFO("Jong-xgi_state_change \n"); + xgi_state_change(info, (xgi_state_info_t *) arg_copy); + break; + case XGI_ESC_CPUID: + XGI_INFO("Jong-XGI_ESC_CPUID \n"); + xgi_get_cpu_id((struct cpu_info_s *)arg_copy); + break; + default: + XGI_INFO("Jong-xgi_ioctl_default \n"); + status = -EINVAL; + break; + } + + if (copy_to_user((void *)arg, arg_copy, arg_size)) { + XGI_ERROR("failed to copyout ioctl data\n"); + XGI_INFO("Jong-copy_to_user-fail! \n"); + } else + XGI_INFO("Jong-copy_to_user-OK! \n"); + + XGI_KFREE(arg_copy, arg_size); + return status; +} + +/* + * xgi control driver operations defined here + */ +int xgi_kern_ctl_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = &xgi_ctl_device; + + int rc = 0; + + XGI_INFO("Jong-xgi_kern_ctl_open\n"); + + xgi_down(info->info_sem); + info->device_number = XGI_CONTROL_DEVICE_NUMBER; + + /* save the xgi info in file->private_data */ + filp->private_data = info; + + if (XGI_ATOMIC_READ(info->use_count) == 0) { + init_waitqueue_head(&xgi_ctl_waitqueue); + } + + info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; + + XGI_ATOMIC_INC(info->use_count); + xgi_up(info->info_sem); + + return rc; +} + +int xgi_kern_ctl_close(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-xgi_kern_ctl_close\n"); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { + info->flags = 0; + } + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) +{ + //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; + unsigned int ret = 0; + + if (!(filp->f_flags & O_NONBLOCK)) { + poll_wait(filp, &xgi_ctl_waitqueue, wait); + } + + return ret; +} + +/* + * xgi proc system + */ +static u8 xgi_find_pcie_capability(struct pci_dev *dev) +{ + u16 status; + u8 cap_ptr, cap_id; + + pci_read_config_word(dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do { + cap_ptr &= 0xFC; + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, + &cap_ptr); + } while (cap_ptr && cap_id != 0xFF); + + return 0; +} + +static struct pci_dev *xgi_get_pci_device(xgi_info_t * info) +{ + struct pci_dev *dev; + + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); + while (dev) { + if (XGI_PCI_SLOT_NUMBER(dev) == info->slot + && XGI_PCI_BUS_NUMBER(dev) == info->bus) + return dev; + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); + } + + return NULL; +} + +int xgi_kern_read_card_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct pci_dev *dev; + char *type; + int len = 0; + + xgi_info_t *info; + info = (xgi_info_t *) data; + + dev = xgi_get_pci_device(info); + if (!dev) + return 0; + + type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; + len += sprintf(page + len, "Card Type: \t %s\n", type); + + XGI_PCI_DEV_PUT(dev); + return len; +} + +int xgi_kern_read_version(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + + len += sprintf(page + len, "XGI version: %s\n", "1.0"); + len += sprintf(page + len, "GCC version: %s\n", "3.0"); + + return len; +} + +int xgi_kern_read_pcie_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + +int xgi_kern_read_status(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + +static void xgi_proc_create(void) +{ +#ifdef CONFIG_PROC_FS + + struct pci_dev *dev; + int i = 0; + char name[6]; + + struct proc_dir_entry *entry; + struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; + + xgi_info_t *info; + xgi_info_t *xgi_max_devices; + + /* world readable directory */ + int flags = S_IFDIR | S_IRUGO | S_IXUGO; + + proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); + if (!proc_xgi) + goto failed; + + proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); + if (!proc_xgi_cards) + goto failed; + + proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); + if (!proc_xgi_pcie) + goto failed; + + /* + * Set the module owner to ensure that the reference + * count reflects accesses to the proc files. + */ + proc_xgi->owner = THIS_MODULE; + proc_xgi_cards->owner = THIS_MODULE; + proc_xgi_pcie->owner = THIS_MODULE; + + xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; + for (info = xgi_devices; info < xgi_max_devices; info++) { + if (info->device_id == 0) + break; + + /* world readable file */ + flags = S_IFREG | S_IRUGO; + + dev = xgi_get_pci_device(info); + if (!dev) + break; + + sprintf(name, "%d", i++); + entry = create_proc_entry(name, flags, proc_xgi_cards); + if (!entry) { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_card_info; + entry->owner = THIS_MODULE; + + if (xgi_find_pcie_capability(dev)) { + entry = + create_proc_entry("status", flags, proc_xgi_pcie); + if (!entry) { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_status; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("card", flags, proc_xgi_pcie); + if (!entry) { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + } + + XGI_PCI_DEV_PUT(dev); + } + + entry = create_proc_entry("version", flags, proc_xgi); + if (!entry) + goto failed; + + entry->read_proc = xgi_kern_read_version; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); + if (!entry) + goto failed; + + entry->data = NULL; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + + return; + + failed: + XGI_ERROR("failed to create /proc entries!\n"); + xgi_proc_remove_all(proc_xgi); +#endif +} + +#ifdef CONFIG_PROC_FS +static void xgi_proc_remove_all(struct proc_dir_entry *entry) +{ + while (entry) { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + xgi_proc_remove_all(entry->subdir); + remove_proc_entry(entry->name, entry->parent); + if (entry == proc_xgi) + break; + entry = next; + } +} +#endif + +static void xgi_proc_remove(void) +{ +#ifdef CONFIG_PROC_FS + xgi_proc_remove_all(proc_xgi); +#endif +} + +/* + * driver receives an interrupt if someone waiting, then hand it off. + */ +irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + xgi_info_t *info = (xgi_info_t *) dev_id; + u32 need_to_run_bottom_half = 0; + + //XGI_INFO("xgi_kern_isr \n"); + + //XGI_CHECK_PCI_CONFIG(info); + + //xgi_dvi_irq_handler(info); + + if (need_to_run_bottom_half) { + tasklet_schedule(&info->tasklet); + } + + return IRQ_HANDLED; +} + +void xgi_kern_isr_bh(unsigned long data) +{ + xgi_info_t *info = (xgi_info_t *) data; + + XGI_INFO("xgi_kern_isr_bh \n"); + + //xgi_dvi_irq_handler(info); + + XGI_CHECK_PCI_CONFIG(info); +} + +static void xgi_lock_init(xgi_info_t * info) +{ + if (info == NULL) + return; + + spin_lock_init(&info->info_lock); + + sema_init(&info->info_sem, 1); + sema_init(&info->fb_sem, 1); + sema_init(&info->pcie_sem, 1); + + XGI_ATOMIC_SET(info->use_count, 0); +} + +static void xgi_dev_init(xgi_info_t * info) +{ + struct pci_dev *pdev = NULL; + struct xgi_dev *dev; + int found = 0; + u16 pci_cmd; + + XGI_INFO("Enter xgi_dev_init \n"); + + //XGI_PCI_FOR_EACH_DEV(pdev) + { + for (dev = xgidev_list; dev->vendor; dev++) { + if ((dev->vendor == pdev->vendor) + && (dev->device == pdev->device)) { + XGI_INFO("dev->vendor = pdev->vendor= %x \n", + dev->vendor); + XGI_INFO("dev->device = pdev->device= %x \n", + dev->device); + + xgi_devices[found].device_id = pdev->device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, + &xgi_devices[found]. + revision_id); + + XGI_INFO("PCI_REVISION_ID= %x \n", + xgi_devices[found].revision_id); + + pci_read_config_word(pdev, PCI_COMMAND, + &pci_cmd); + + XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); + + break; + } + } + } +} + +/* + * Export to Linux Kernel + */ + +static int __init xgi_init_module(void) +{ + xgi_info_t *info = &xgi_devices[xgi_num_devices]; + int i, result; + + XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); + //SET_MODULE_OWNER(&xgi_fops); + + memset(xgi_devices, 0, sizeof(xgi_devices)); + + if (pci_register_driver(&xgi_pci_driver) < 0) { + pci_unregister_driver(&xgi_pci_driver); + XGI_ERROR("no XGI graphics adapter found\n"); + return -ENODEV; + } + + XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, + xgi_devices[xgi_num_devices].fb.base); + XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, + xgi_devices[xgi_num_devices].fb.size); + +/* Jong 07/27/2006; test for ubuntu */ +/* +#ifdef CONFIG_DEVFS_FS + + XGI_INFO("Jong-Use devfs \n"); + do + { + xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); + if (xgi_devfs_handles[0] == NULL) + { + result = -ENOMEM; + XGI_ERROR("devfs register failed\n"); + goto failed; + } + } while(0); + #else *//* no devfs, do it the "classic" way */ + + XGI_INFO("Jong-Use non-devfs \n"); + /* + * Register your major, and accept a dynamic number. This is the + * first thing to do, in order to avoid releasing other module's + * fops in scull_cleanup_module() + */ + result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); + if (result < 0) { + XGI_ERROR("register chrdev failed\n"); + pci_unregister_driver(&xgi_pci_driver); + return result; + } + if (xgi_major == 0) + xgi_major = result; /* dynamic */ + + /* #endif *//* CONFIG_DEVFS_FS */ + + XGI_INFO("Jong-major number %d\n", xgi_major); + + /* instantiate tasklets */ + for (i = 0; i < XGI_MAX_DEVICES; i++) { + /* + * We keep one tasklet per card to avoid latency issues with more + * than one device; no two instances of a single tasklet are ever + * executed concurrently. + */ + XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); + } + + /* init the xgi control device */ + { + xgi_info_t *info_ctl = &xgi_ctl_device; + xgi_lock_init(info_ctl); + } + + /* Init the resource manager */ + INIT_LIST_HEAD(&xgi_mempid_list); + if (!xgi_fb_heap_init(info)) { + XGI_ERROR("xgi_fb_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* Init the resource manager */ + if (!xgi_pcie_heap_init(info)) { + XGI_ERROR("xgi_pcie_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* create /proc/driver/xgi */ + xgi_proc_create(); + +#if defined(DEBUG) + inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); +#endif + + return 0; + + failed: +#ifdef CONFIG_DEVFS_FS + XGI_DEVFS_REMOVE_CONTROL(); + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + for (i = 0; i < xgi_num_devices; i++) { + if (xgi_devices[i].dev) { + release_mem_region(xgi_devices[i].fb.base, + xgi_devices[i].fb.size); + release_mem_region(xgi_devices[i].mmio.base, + xgi_devices[i].mmio.size); + } + } + + pci_unregister_driver(&xgi_pci_driver); + return result; + + return 1; +} + +void __exit xgi_exit_module(void) +{ + int i; + xgi_info_t *info, *max_devices; + +#ifdef CONFIG_DEVFS_FS + /* + XGI_DEVFS_REMOVE_CONTROL(); + for (i = 0; i < XGI_MAX_DEVICES; i++) + XGI_DEVFS_REMOVE_DEVICE(i); + */ + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); + for (i = 0; i < XGI_MAX_DEVICES; i++) { + if (xgi_devices[i].dev) { + /* clean up the flush2D batch array */ + xgi_cmdlist_cleanup(&xgi_devices[i]); + + if (xgi_devices[i].fb.vbase != NULL) { + iounmap((void *)xgi_devices[i].fb.vbase); + xgi_devices[i].fb.vbase = NULL; + } + if (xgi_devices[i].mmio.vbase != NULL) { + iounmap((void *)xgi_devices[i].mmio.vbase); + xgi_devices[i].mmio.vbase = NULL; + } + //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); + //XGI_INFO("release frame buffer mem region scceeded\n"); + + release_mem_region(xgi_devices[i].mmio.base, + xgi_devices[i].mmio.size); + XGI_INFO("release MMIO mem region scceeded\n"); + + xgi_fb_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); + + xgi_pcie_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); + + XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); + } + } + + pci_unregister_driver(&xgi_pci_driver); + + /* remove /proc/driver/xgi */ + xgi_proc_remove(); + +#if defined(DEBUG) + inter_module_unregister("xgi_devices"); +#endif +} + +module_init(xgi_init_module); +module_exit(xgi_exit_module); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_acpi_event(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_resume(struct pci_dev *dev) +{ + return 1; +} +#endif + +MODULE_AUTHOR("Andrea Zhang "); +MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); +MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 568a7af1..429719a7 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -1,364 +1,364 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_DRV_H_ -#define _XGI_DRV_H_ - -#define XGI_MAJOR_VERSION 0 -#define XGI_MINOR_VERSION 7 -#define XGI_PATCHLEVEL 5 - -#define XGI_DRV_VERSION "0.7.5" - -#ifndef XGI_DRV_NAME -#define XGI_DRV_NAME "xgi" -#endif - -/* - * xgi reserved major device number, Set this to 0 to - * request dynamic major number allocation. - */ -#ifndef XGI_DEV_MAJOR -#define XGI_DEV_MAJOR 0 -#endif - -#ifndef XGI_MAX_DEVICES -#define XGI_MAX_DEVICES 1 -#endif - -/* Jong 06/06/2006 */ -/* #define XGI_DEBUG */ - -#ifndef PCI_VENDOR_ID_XGI -/* -#define PCI_VENDOR_ID_XGI 0x1023 -*/ -#define PCI_VENDOR_ID_XGI 0x18CA - -#endif - -#ifndef PCI_DEVICE_ID_XP5 -#define PCI_DEVICE_ID_XP5 0x2200 -#endif - -#ifndef PCI_DEVICE_ID_XG47 -#define PCI_DEVICE_ID_XG47 0x0047 -#endif - -/* Macros to make printk easier */ -#define XGI_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) - -#define XGI_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) - -/* #define XGI_DEBUG */ - -#ifdef XGI_DEBUG -#define XGI_INFO(fmt, arg...) \ - printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) -/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ -#else -#define XGI_INFO(fmt, arg...) do { } while (0) -#endif - -/* device name length; must be atleast 8 */ -#define XGI_DEVICE_NAME_LENGTH 40 - -/* need a fake device number for control device; just to flag it for msgs */ -#define XGI_CONTROL_DEVICE_NUMBER 100 - -typedef struct { - U32 base; // pcie base is different from fb base - U32 size; - U8 *vbase; -} xgi_aperture_t; - -typedef struct xgi_screen_info_s { - U32 scrn_start; - U32 scrn_xres; - U32 scrn_yres; - U32 scrn_bpp; - U32 scrn_pitch; -} xgi_screen_info_t; - -typedef struct xgi_sarea_info_s { - U32 bus_addr; - U32 size; -} xgi_sarea_info_t; - -typedef struct xgi_info_s { - struct pci_dev *dev; - int flags; - int device_number; - int bus; /* PCI config info */ - int slot; - int vendor_id; - U32 device_id; - U8 revision_id; - - /* physical characteristics */ - xgi_aperture_t mmio; - xgi_aperture_t fb; - xgi_aperture_t pcie; - xgi_screen_info_t scrn_info; - xgi_sarea_info_t sarea_info; - - /* look up table parameters */ - U32 *lut_base; - U32 lutPageSize; - U32 lutPageOrder; - U32 isLUTInLFB; - U32 sdfbPageSize; - - U32 pcie_config; - U32 pcie_status; - U32 irq; - - atomic_t use_count; - - /* keep track of any pending bottom halfes */ - struct tasklet_struct tasklet; - - spinlock_t info_lock; - - struct semaphore info_sem; - struct semaphore fb_sem; - struct semaphore pcie_sem; -} xgi_info_t; - -typedef struct xgi_ioctl_post_vbios { - U32 bus; - U32 slot; -} xgi_ioctl_post_vbios_t; - -typedef enum xgi_mem_location_s -{ - NON_LOCAL = 0, - LOCAL = 1, - INVALID = 0x7fffffff -} xgi_mem_location_t; - -enum PcieOwner -{ - PCIE_2D = 0, - /* - PCIE_3D should not begin with 1, - 2D alloc pcie memory will use owner 1. - */ - PCIE_3D = 11,/*vetex buf*/ - PCIE_3D_CMDLIST = 12, - PCIE_3D_SCRATCHPAD = 13, - PCIE_3D_TEXTURE = 14, - PCIE_INVALID = 0x7fffffff -}; - -typedef struct xgi_mem_req_s { - xgi_mem_location_t location; - unsigned long size; - unsigned long is_front; - enum PcieOwner owner; - unsigned long pid; -} xgi_mem_req_t; - -typedef struct xgi_mem_alloc_s { - xgi_mem_location_t location; - unsigned long size; - unsigned long bus_addr; - unsigned long hw_addr; - unsigned long pid; -} xgi_mem_alloc_t; - -typedef struct xgi_chip_info_s { - U32 device_id; - char device_name[32]; - U32 vendor_id; - U32 curr_display_mode; //Singe, DualView(Contained), MHS - U32 fb_size; - U32 sarea_bus_addr; - U32 sarea_size; -} xgi_chip_info_t; - -typedef struct xgi_opengl_cmd_s { - U32 cmd; -} xgi_opengl_cmd_t; - -typedef struct xgi_mmio_info_s { - xgi_opengl_cmd_t cmd_head; - void *mmioBase; - int size; -} xgi_mmio_info_t; - -typedef enum { - BTYPE_2D = 0, - BTYPE_3D = 1, - BTYPE_FLIP = 2, - BTYPE_CTRL = 3, - BTYPE_NONE = 0x7fffffff -}BATCH_TYPE; - -typedef struct xgi_cmd_info_s { - BATCH_TYPE _firstBeginType; - U32 _firstBeginAddr; - U32 _firstSize; - U32 _curDebugID; - U32 _lastBeginAddr; - U32 _beginCount; -} xgi_cmd_info_t; - -typedef struct xgi_state_info_s { - U32 _fromState; - U32 _toState; -} xgi_state_info_t; - -typedef struct cpu_info_s { - U32 _eax; - U32 _ebx; - U32 _ecx; - U32 _edx; -} cpu_info_t; - -typedef struct xgi_mem_pid_s { - struct list_head list; - xgi_mem_location_t location; - unsigned long bus_addr; - unsigned long pid; -} xgi_mem_pid_t; - -/* - * Ioctl definitions - */ - -#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ - -#define XGI_IOCTL_BASE 0 -#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) -#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) - -#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) -#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) -#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) -#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) -#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) -#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) -#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) -#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) -#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) -#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) -#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) -#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) -#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) -#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) -#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) -#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) -#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) - -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) -#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) - -#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) -#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) - -#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) -#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) - -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) - -#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) -#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) -#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) - -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) - -#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) -#define XGI_IOCTL_MAXNR 30 - -/* - * flags - */ -#define XGI_FLAG_OPEN 0x0001 -#define XGI_FLAG_NEEDS_POSTING 0x0002 -#define XGI_FLAG_WAS_POSTED 0x0004 -#define XGI_FLAG_CONTROL 0x0010 -#define XGI_FLAG_MAP_REGS_EARLY 0x0200 - -/* mmap(2) offsets */ - -#define IS_IO_OFFSET(info, offset, length) \ - (((offset) >= (info)->mmio.base) \ - && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) - -/* Jong 06/14/2006 */ -/* (info)->fb.base is a base address for physical (bus) address space */ -/* what's the definition of offest? on physical (bus) address space or HW address space */ -/* Jong 06/15/2006; use HW address space */ -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= 0) \ - && (((offset) + (length)) <= (info)->fb.size)) -#if 0 -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= (info)->fb.base) \ - && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) -#endif - -#define IS_PCIE_OFFSET(info, offset, length) \ - (((offset) >= (info)->pcie.base) \ - && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) - -extern int xgi_fb_heap_init(xgi_info_t *info); -extern void xgi_fb_heap_cleanup(xgi_info_t *info); - -extern void xgi_fb_alloc(xgi_info_t *info, xgi_mem_req_t *req, xgi_mem_alloc_t *alloc); -extern void xgi_fb_free(xgi_info_t *info, unsigned long offset); -extern void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt); - -extern int xgi_pcie_heap_init(xgi_info_t *info); -extern void xgi_pcie_heap_cleanup(xgi_info_t *info); - -extern void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t *alloc); -extern void xgi_pcie_free(xgi_info_t *info, unsigned long offset); -extern void xgi_pcie_heap_check(void); -extern void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address); -extern void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address); - -extern void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); -extern void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); - -extern void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address); - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_DRV_H_ +#define _XGI_DRV_H_ + +#define XGI_MAJOR_VERSION 0 +#define XGI_MINOR_VERSION 7 +#define XGI_PATCHLEVEL 5 + +#define XGI_DRV_VERSION "0.7.5" + +#ifndef XGI_DRV_NAME +#define XGI_DRV_NAME "xgi" +#endif + +/* + * xgi reserved major device number, Set this to 0 to + * request dynamic major number allocation. + */ +#ifndef XGI_DEV_MAJOR +#define XGI_DEV_MAJOR 0 +#endif + +#ifndef XGI_MAX_DEVICES +#define XGI_MAX_DEVICES 1 +#endif + +/* Jong 06/06/2006 */ +/* #define XGI_DEBUG */ + +#ifndef PCI_VENDOR_ID_XGI +/* +#define PCI_VENDOR_ID_XGI 0x1023 +*/ +#define PCI_VENDOR_ID_XGI 0x18CA + +#endif + +#ifndef PCI_DEVICE_ID_XP5 +#define PCI_DEVICE_ID_XP5 0x2200 +#endif + +#ifndef PCI_DEVICE_ID_XG47 +#define PCI_DEVICE_ID_XG47 0x0047 +#endif + +/* Macros to make printk easier */ +#define XGI_ERROR(fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +#define XGI_MEM_ERROR(area, fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +/* #define XGI_DEBUG */ + +#ifdef XGI_DEBUG +#define XGI_INFO(fmt, arg...) \ + printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) +/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ +#else +#define XGI_INFO(fmt, arg...) do { } while (0) +#endif + +/* device name length; must be atleast 8 */ +#define XGI_DEVICE_NAME_LENGTH 40 + +/* need a fake device number for control device; just to flag it for msgs */ +#define XGI_CONTROL_DEVICE_NUMBER 100 + +typedef struct { + U32 base; // pcie base is different from fb base + U32 size; + U8 *vbase; +} xgi_aperture_t; + +typedef struct xgi_screen_info_s { + U32 scrn_start; + U32 scrn_xres; + U32 scrn_yres; + U32 scrn_bpp; + U32 scrn_pitch; +} xgi_screen_info_t; + +typedef struct xgi_sarea_info_s { + U32 bus_addr; + U32 size; +} xgi_sarea_info_t; + +typedef struct xgi_info_s { + struct pci_dev *dev; + int flags; + int device_number; + int bus; /* PCI config info */ + int slot; + int vendor_id; + U32 device_id; + U8 revision_id; + + /* physical characteristics */ + xgi_aperture_t mmio; + xgi_aperture_t fb; + xgi_aperture_t pcie; + xgi_screen_info_t scrn_info; + xgi_sarea_info_t sarea_info; + + /* look up table parameters */ + U32 *lut_base; + U32 lutPageSize; + U32 lutPageOrder; + U32 isLUTInLFB; + U32 sdfbPageSize; + + U32 pcie_config; + U32 pcie_status; + U32 irq; + + atomic_t use_count; + + /* keep track of any pending bottom halfes */ + struct tasklet_struct tasklet; + + spinlock_t info_lock; + + struct semaphore info_sem; + struct semaphore fb_sem; + struct semaphore pcie_sem; +} xgi_info_t; + +typedef struct xgi_ioctl_post_vbios { + U32 bus; + U32 slot; +} xgi_ioctl_post_vbios_t; + +typedef enum xgi_mem_location_s { + NON_LOCAL = 0, + LOCAL = 1, + INVALID = 0x7fffffff +} xgi_mem_location_t; + +enum PcieOwner { + PCIE_2D = 0, + /* + PCIE_3D should not begin with 1, + 2D alloc pcie memory will use owner 1. + */ + PCIE_3D = 11, /*vetex buf */ + PCIE_3D_CMDLIST = 12, + PCIE_3D_SCRATCHPAD = 13, + PCIE_3D_TEXTURE = 14, + PCIE_INVALID = 0x7fffffff +}; + +typedef struct xgi_mem_req_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long is_front; + enum PcieOwner owner; + unsigned long pid; +} xgi_mem_req_t; + +typedef struct xgi_mem_alloc_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long bus_addr; + unsigned long hw_addr; + unsigned long pid; +} xgi_mem_alloc_t; + +typedef struct xgi_chip_info_s { + U32 device_id; + char device_name[32]; + U32 vendor_id; + U32 curr_display_mode; //Singe, DualView(Contained), MHS + U32 fb_size; + U32 sarea_bus_addr; + U32 sarea_size; +} xgi_chip_info_t; + +typedef struct xgi_opengl_cmd_s { + U32 cmd; +} xgi_opengl_cmd_t; + +typedef struct xgi_mmio_info_s { + xgi_opengl_cmd_t cmd_head; + void *mmioBase; + int size; +} xgi_mmio_info_t; + +typedef enum { + BTYPE_2D = 0, + BTYPE_3D = 1, + BTYPE_FLIP = 2, + BTYPE_CTRL = 3, + BTYPE_NONE = 0x7fffffff +} BATCH_TYPE; + +typedef struct xgi_cmd_info_s { + BATCH_TYPE _firstBeginType; + U32 _firstBeginAddr; + U32 _firstSize; + U32 _curDebugID; + U32 _lastBeginAddr; + U32 _beginCount; +} xgi_cmd_info_t; + +typedef struct xgi_state_info_s { + U32 _fromState; + U32 _toState; +} xgi_state_info_t; + +typedef struct cpu_info_s { + U32 _eax; + U32 _ebx; + U32 _ecx; + U32 _edx; +} cpu_info_t; + +typedef struct xgi_mem_pid_s { + struct list_head list; + xgi_mem_location_t location; + unsigned long bus_addr; + unsigned long pid; +} xgi_mem_pid_t; + +/* + * Ioctl definitions + */ + +#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ + +#define XGI_IOCTL_BASE 0 +#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) +#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) + +#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) +#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) +#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) +#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) +#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) +#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) +#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) +#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) +#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) +#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) +#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) +#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) +#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) +#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) +#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) +#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) +#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) +#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) + +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) + +#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) + +#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) + +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) + +#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) + +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) + +#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) +#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_MAXNR 30 + +/* + * flags + */ +#define XGI_FLAG_OPEN 0x0001 +#define XGI_FLAG_NEEDS_POSTING 0x0002 +#define XGI_FLAG_WAS_POSTED 0x0004 +#define XGI_FLAG_CONTROL 0x0010 +#define XGI_FLAG_MAP_REGS_EARLY 0x0200 + +/* mmap(2) offsets */ + +#define IS_IO_OFFSET(info, offset, length) \ + (((offset) >= (info)->mmio.base) \ + && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) + +/* Jong 06/14/2006 */ +/* (info)->fb.base is a base address for physical (bus) address space */ +/* what's the definition of offest? on physical (bus) address space or HW address space */ +/* Jong 06/15/2006; use HW address space */ +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= 0) \ + && (((offset) + (length)) <= (info)->fb.size)) +#if 0 +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= (info)->fb.base) \ + && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) +#endif + +#define IS_PCIE_OFFSET(info, offset, length) \ + (((offset) >= (info)->pcie.base) \ + && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) + +extern int xgi_fb_heap_init(xgi_info_t * info); +extern void xgi_fb_heap_cleanup(xgi_info_t * info); + +extern void xgi_fb_alloc(xgi_info_t * info, xgi_mem_req_t * req, + xgi_mem_alloc_t * alloc); +extern void xgi_fb_free(xgi_info_t * info, unsigned long offset); +extern void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt); + +extern int xgi_pcie_heap_init(xgi_info_t * info); +extern void xgi_pcie_heap_cleanup(xgi_info_t * info); + +extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, + enum PcieOwner owner, xgi_mem_alloc_t * alloc); +extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); +extern void xgi_pcie_heap_check(void); +extern void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address); +extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); + +extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); +extern void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); + +extern void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address); + +#endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 67fdfe17..fab99ae2 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -1,528 +1,491 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_fb.h" - -#define XGI_FB_HEAP_START 0x1000000 - -static xgi_mem_heap_t *xgi_fb_heap; -static kmem_cache_t *xgi_fb_cache_block = NULL; -extern struct list_head xgi_mempid_list; - -static xgi_mem_block_t *xgi_mem_new_node(void); -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long size); -static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset); - -void xgi_fb_alloc(xgi_info_t *info, - xgi_mem_req_t *req, - xgi_mem_alloc_t *alloc) -{ - xgi_mem_block_t *block; - xgi_mem_pid_t *mempid_block; - - if (req->is_front) - { - alloc->location = LOCAL; - alloc->bus_addr = info->fb.base; - alloc->hw_addr = 0; - XGI_INFO("Video RAM allocation on front buffer successfully! \n"); - } - else - { - xgi_down(info->fb_sem); - block = xgi_mem_alloc(info, req->size); - xgi_up(info->fb_sem); - - if (block == NULL) - { - alloc->location = LOCAL; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("Video RAM allocation failed\n"); - } - else - { - XGI_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *) block->offset); - alloc->location = LOCAL; - alloc->size = block->size; - alloc->bus_addr = info->fb.base + block->offset; - alloc->hw_addr = block->offset; - - /* manage mempid */ - mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); - mempid_block->location = LOCAL; - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; - - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - - XGI_INFO("Memory ProcessID add one fb block pid:%ld successfully! \n", mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} - -void xgi_fb_free(xgi_info_t *info, unsigned long bus_addr) -{ - xgi_mem_block_t *block; - unsigned long offset = bus_addr - info->fb.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; - struct list_head *mempid_list; - - if (offset < 0) - { - XGI_INFO("free onscreen frame buffer successfully !\n"); - } - else - { - xgi_down(info->fb_sem); - block = xgi_mem_free(info, offset); - xgi_up(info->fb_sem); - - if (block == NULL) - { - XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", offset); - } - - /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) - { - mempid_freeblock = mempid_block; - break; - } - mempid_list = mempid_list->next; - } - if (mempid_freeblock) - { - list_del(&mempid_freeblock->list); - XGI_INFO("Memory ProcessID delete one fb block pid:%ld successfully! \n", mempid_freeblock->pid); - kfree(mempid_freeblock); - } - } -} - -int xgi_fb_heap_init(xgi_info_t *info) -{ - xgi_mem_block_t *block; - - xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); - if (!xgi_fb_heap) - { - XGI_ERROR("xgi_fb_heap alloc failed\n"); - return 0; - } - - INIT_LIST_HEAD(&xgi_fb_heap->free_list); - INIT_LIST_HEAD(&xgi_fb_heap->used_list); - INIT_LIST_HEAD(&xgi_fb_heap->sort_list); - - xgi_fb_cache_block = kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - - if (NULL == xgi_fb_cache_block) - { - XGI_ERROR("Fail to creat xgi_fb_block\n"); - goto fail1; - } - - block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); - if (!block) - { - XGI_ERROR("kmem_cache_alloc failed\n"); - goto fail2; - } - - block->offset = XGI_FB_HEAP_START; - block->size = info->fb.size - XGI_FB_HEAP_START; - - list_add(&block->list, &xgi_fb_heap->free_list); - - xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; - - XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); - XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); - - return 1; - -fail2: - if (xgi_fb_cache_block) - { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; - } -fail1: - if(xgi_fb_heap) - { - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } - return 0; -} - -void xgi_fb_heap_cleanup(xgi_info_t *info) -{ - struct list_head *free_list, *temp; - xgi_mem_block_t *block; - int i; - - if (xgi_fb_heap) - { - free_list = &xgi_fb_heap->free_list; - for (i = 0; i < 3; i++, free_list++) - { - temp = free_list->next; - while (temp != free_list) - { - block = list_entry(temp, struct xgi_mem_block_s, list); - temp = temp->next; - - XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - i, block->offset, block->size); - //XGI_INFO("No. %d free block: 0x%p \n", i, block); - kmem_cache_free(xgi_fb_cache_block, block); - block = NULL; - } - } - XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } - - if (xgi_fb_cache_block) - { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; - } -} - -static xgi_mem_block_t * xgi_mem_new_node(void) -{ - xgi_mem_block_t *block; - - block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); - if (!block) - { - XGI_ERROR("kmem_cache_alloc failed\n"); - return NULL; - } - - return block; -} - -#if 0 -static void xgi_mem_insert_node_after(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block); -static void xgi_mem_insert_node_before(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block); -static void xgi_mem_insert_node_head(xgi_mem_list_t *list, - xgi_mem_block_t *block); -static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, - xgi_mem_block_t *block); -static void xgi_mem_delete_node(xgi_mem_list_t *list, - xgi_mem_block_t *block); -/* - * insert node:block after node:current - */ -static void xgi_mem_insert_node_after(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block) -{ - block->prev = current; - block->next = current->next; - current->next = block; - - if (current == list->tail) - { - list->tail = block; - } - else - { - block->next->prev = block; - } -} - -/* - * insert node:block before node:current - */ -static void xgi_mem_insert_node_before(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block) -{ - block->prev = current->prev; - block->next = current; - current->prev = block; - if (current == list->head) - { - list->head = block; - } - else - { - block->prev->next = block; - } -} -void xgi_mem_insert_node_head(xgi_mem_list_t *list, - xgi_mem_block_t *block) -{ - block->next = list->head; - block->prev = NULL; - - if (NULL == list->head) - { - list->tail = block; - } - else - { - list->head->prev = block; - } - list->head = block; -} - -static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, - xgi_mem_block_t *block) - -{ - block->next = NULL; - block->prev = list->tail; - if (NULL == list->tail) - { - list->head = block; - } - else - { - list->tail->next = block; - } - list->tail = block; -} - -static void xgi_mem_delete_node(xgi_mem_list_t *list, - xgi_mem_block_t *block) -{ - if (block == list->head) - { - list->head = block->next; - } - if (block == list->tail) - { - list->tail = block->prev; - } - - if (block->prev) - { - block->prev->next = block->next; - } - if (block->next) - { - block->next->prev = block->prev; - } - - block->next = block->prev = NULL; -} -#endif -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long originalSize) -{ - struct list_head *free_list; - xgi_mem_block_t *block, *free_block, *used_block; - - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); - - if (size == 0) - { - XGI_ERROR("size == 0\n"); - return (NULL); - } - XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); - if (size > xgi_fb_heap->max_freesize) - { - XGI_ERROR("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", - size, xgi_fb_heap->max_freesize); - return (NULL); - } - - free_list = xgi_fb_heap->free_list.next; - - while (free_list != &xgi_fb_heap->free_list) - { - XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_mem_block_s, list); - if (size <= block->size) - { - break; - } - free_list = free_list->next; - } - - if (free_list == &xgi_fb_heap->free_list) - { - XGI_ERROR("Can't allocate %ldk size from frame buffer memory !\n", size/1024); - return (NULL); - } - - free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) - { - used_block = free_block; - XGI_INFO("size == free_block->size: free_block = 0x%p\n", free_block); - list_del(&free_block->list); - } - else - { - used_block = xgi_mem_new_node(); - - if (used_block == NULL) return (NULL); - - if (used_block == free_block) - { - XGI_ERROR("used_block == free_block = 0x%p\n", used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - xgi_fb_heap->max_freesize -= size; - - list_add(&used_block->list, &xgi_fb_heap->used_list); - - return (used_block); -} - -static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset) -{ - struct list_head *free_list, *used_list; - xgi_mem_block_t *used_block = NULL, *block = NULL; - xgi_mem_block_t *prev, *next; - - unsigned long upper; - unsigned long lower; - - used_list = xgi_fb_heap->used_list.next; - while (used_list != &xgi_fb_heap->used_list) - { - block = list_entry(used_list, struct xgi_mem_block_s, list); - if (block->offset == offset) - { - break; - } - used_list = used_list->next; - } - - if (used_list == &xgi_fb_heap->used_list) - { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); - } - - used_block = block; - XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - xgi_fb_heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - free_list = xgi_fb_heap->free_list.next; - while (free_list != &xgi_fb_heap->free_list) - { - block = list_entry(free_list, struct xgi_mem_block_s, list); - - if (block->offset == upper) - { - next = block; - } - else if ((block->offset + block->size) == lower) - { - prev = block; - } - free_list = free_list->next; - } - - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) - { - prev->size += (used_block->size + next->size); - list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_fb_cache_block, next); - kmem_cache_free(xgi_fb_cache_block, used_block); - - next = NULL; - used_block = NULL; - return (prev); - } - - if (prev) - { - prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (prev); - } - - if (next) - { - next->size += used_block->size; - next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (next); - } - - list_add(&used_block->list, &xgi_fb_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - return (used_block); -} - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_fb.h" + +#define XGI_FB_HEAP_START 0x1000000 + +static xgi_mem_heap_t *xgi_fb_heap; +static kmem_cache_t *xgi_fb_cache_block = NULL; +extern struct list_head xgi_mempid_list; + +static xgi_mem_block_t *xgi_mem_new_node(void); +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, unsigned long size); +static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset); + +void xgi_fb_alloc(xgi_info_t * info, + xgi_mem_req_t * req, xgi_mem_alloc_t * alloc) +{ + xgi_mem_block_t *block; + xgi_mem_pid_t *mempid_block; + + if (req->is_front) { + alloc->location = LOCAL; + alloc->bus_addr = info->fb.base; + alloc->hw_addr = 0; + XGI_INFO + ("Video RAM allocation on front buffer successfully! \n"); + } else { + xgi_down(info->fb_sem); + block = xgi_mem_alloc(info, req->size); + xgi_up(info->fb_sem); + + if (block == NULL) { + alloc->location = LOCAL; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("Video RAM allocation failed\n"); + } else { + XGI_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *)block->offset); + alloc->location = LOCAL; + alloc->size = block->size; + alloc->bus_addr = info->fb.base + block->offset; + alloc->hw_addr = block->offset; + + /* manage mempid */ + mempid_block = + kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + mempid_block->location = LOCAL; + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + + XGI_INFO + ("Memory ProcessID add one fb block pid:%ld successfully! \n", + mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) +{ + xgi_mem_block_t *block; + unsigned long offset = bus_addr - info->fb.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + + if (offset < 0) { + XGI_INFO("free onscreen frame buffer successfully !\n"); + } else { + xgi_down(info->fb_sem); + block = xgi_mem_free(info, offset); + xgi_up(info->fb_sem); + + if (block == NULL) { + XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", + offset); + } + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == LOCAL + && mempid_block->bus_addr == bus_addr) { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) { + list_del(&mempid_freeblock->list); + XGI_INFO + ("Memory ProcessID delete one fb block pid:%ld successfully! \n", + mempid_freeblock->pid); + kfree(mempid_freeblock); + } + } +} + +int xgi_fb_heap_init(xgi_info_t * info) +{ + xgi_mem_block_t *block; + + xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); + if (!xgi_fb_heap) { + XGI_ERROR("xgi_fb_heap alloc failed\n"); + return 0; + } + + INIT_LIST_HEAD(&xgi_fb_heap->free_list); + INIT_LIST_HEAD(&xgi_fb_heap->used_list); + INIT_LIST_HEAD(&xgi_fb_heap->sort_list); + + xgi_fb_cache_block = + kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_fb_cache_block) { + XGI_ERROR("Fail to creat xgi_fb_block\n"); + goto fail1; + } + + block = + (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + GFP_KERNEL); + if (!block) { + XGI_ERROR("kmem_cache_alloc failed\n"); + goto fail2; + } + + block->offset = XGI_FB_HEAP_START; + block->size = info->fb.size - XGI_FB_HEAP_START; + + list_add(&block->list, &xgi_fb_heap->free_list); + + xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + + XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, + block->size); + XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", + xgi_fb_heap->max_freesize); + + return 1; + + fail2: + if (xgi_fb_cache_block) { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } + fail1: + if (xgi_fb_heap) { + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + return 0; +} + +void xgi_fb_heap_cleanup(xgi_info_t * info) +{ + struct list_head *free_list, *temp; + xgi_mem_block_t *block; + int i; + + if (xgi_fb_heap) { + free_list = &xgi_fb_heap->free_list; + for (i = 0; i < 3; i++, free_list++) { + temp = free_list->next; + while (temp != free_list) { + block = + list_entry(temp, struct xgi_mem_block_s, + list); + temp = temp->next; + + XGI_INFO + ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + i, block->offset, block->size); + //XGI_INFO("No. %d free block: 0x%p \n", i, block); + kmem_cache_free(xgi_fb_cache_block, block); + block = NULL; + } + } + XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + + if (xgi_fb_cache_block) { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } +} + +static xgi_mem_block_t *xgi_mem_new_node(void) +{ + xgi_mem_block_t *block; + + block = + (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + GFP_KERNEL); + if (!block) { + XGI_ERROR("kmem_cache_alloc failed\n"); + return NULL; + } + + return block; +} + +#if 0 +static void xgi_mem_insert_node_after(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block); +static void xgi_mem_insert_node_before(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block); +static void xgi_mem_insert_node_head(xgi_mem_list_t * list, + xgi_mem_block_t * block); +static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, + xgi_mem_block_t * block); +static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block); +/* + * insert node:block after node:current + */ +static void xgi_mem_insert_node_after(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block) +{ + block->prev = current; + block->next = current->next; + current->next = block; + + if (current == list->tail) { + list->tail = block; + } else { + block->next->prev = block; + } +} + +/* + * insert node:block before node:current + */ +static void xgi_mem_insert_node_before(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block) +{ + block->prev = current->prev; + block->next = current; + current->prev = block; + if (current == list->head) { + list->head = block; + } else { + block->prev->next = block; + } +} +void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) +{ + block->next = list->head; + block->prev = NULL; + + if (NULL == list->head) { + list->tail = block; + } else { + list->head->prev = block; + } + list->head = block; +} + +static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, + xgi_mem_block_t * block) +{ + block->next = NULL; + block->prev = list->tail; + if (NULL == list->tail) { + list->head = block; + } else { + list->tail->next = block; + } + list->tail = block; +} + +static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) +{ + if (block == list->head) { + list->head = block->next; + } + if (block == list->tail) { + list->tail = block->prev; + } + + if (block->prev) { + block->prev->next = block->next; + } + if (block->next) { + block->next->prev = block->prev; + } + + block->next = block->prev = NULL; +} +#endif +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, + unsigned long originalSize) +{ + struct list_head *free_list; + xgi_mem_block_t *block, *free_block, *used_block; + + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", + originalSize, size); + + if (size == 0) { + XGI_ERROR("size == 0\n"); + return (NULL); + } + XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); + if (size > xgi_fb_heap->max_freesize) { + XGI_ERROR + ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", + size, xgi_fb_heap->max_freesize); + return (NULL); + } + + free_list = xgi_fb_heap->free_list.next; + + while (free_list != &xgi_fb_heap->free_list) { + XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_mem_block_s, list); + if (size <= block->size) { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_fb_heap->free_list) { + XGI_ERROR + ("Can't allocate %ldk size from frame buffer memory !\n", + size / 1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) { + used_block = free_block; + XGI_INFO("size == free_block->size: free_block = 0x%p\n", + free_block); + list_del(&free_block->list); + } else { + used_block = xgi_mem_new_node(); + + if (used_block == NULL) + return (NULL); + + if (used_block == free_block) { + XGI_ERROR("used_block == free_block = 0x%p\n", + used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_fb_heap->max_freesize -= size; + + list_add(&used_block->list, &xgi_fb_heap->used_list); + + return (used_block); +} + +static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_mem_block_t *used_block = NULL, *block = NULL; + xgi_mem_block_t *prev, *next; + + unsigned long upper; + unsigned long lower; + + used_list = xgi_fb_heap->used_list.next; + while (used_list != &xgi_fb_heap->used_list) { + block = list_entry(used_list, struct xgi_mem_block_s, list); + if (block->offset == offset) { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_fb_heap->used_list) { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + xgi_fb_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_fb_heap->free_list.next; + while (free_list != &xgi_fb_heap->free_list) { + block = list_entry(free_list, struct xgi_mem_block_s, list); + + if (block->offset == upper) { + next = block; + } else if ((block->offset + block->size) == lower) { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_fb_cache_block, next); + kmem_cache_free(xgi_fb_cache_block, used_block); + + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (next); + } + + list_add(&used_block->list, &xgi_fb_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + return (used_block); +} diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h index 4b7ec2f2..ae078ae0 100644 --- a/linux-core/xgi_fb.h +++ b/linux-core/xgi_fb.h @@ -1,71 +1,70 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_FB_H_ -#define _XGI_FB_H_ - -typedef struct xgi_mem_block_s { - struct list_head list; - unsigned long offset; - unsigned long size; - atomic_t use_count; -} xgi_mem_block_t; - -typedef struct xgi_mem_heap_s { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; - spinlock_t lock; -} xgi_mem_heap_t; - -#if 0 -typedef struct xgi_mem_block_s { - struct xgi_mem_block_s *next; - struct xgi_mem_block_s *prev; - unsigned long offset; - unsigned long size; - atomic_t use_count; -} xgi_mem_block_t; - -typedef struct xgi_mem_list_s { - xgi_mem_block_t *head; - xgi_mem_block_t *tail; -} xgi_mem_list_t; - -typedef struct xgi_mem_heap_s { - xgi_mem_list_t *free_list; - xgi_mem_list_t *used_list; - xgi_mem_list_t *sort_list; - unsigned long max_freesize; - spinlock_t lock; -} xgi_mem_heap_t; -#endif - -#endif - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_FB_H_ +#define _XGI_FB_H_ + +typedef struct xgi_mem_block_s { + struct list_head list; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; + +#if 0 +typedef struct xgi_mem_block_s { + struct xgi_mem_block_s *next; + struct xgi_mem_block_s *prev; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_list_s { + xgi_mem_block_t *head; + xgi_mem_block_t *tail; +} xgi_mem_list_t; + +typedef struct xgi_mem_heap_s { + xgi_mem_list_t *free_list; + xgi_mem_list_t *used_list; + xgi_mem_list_t *sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; +#endif + +#endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index f207a4f6..67c1af82 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -1,596 +1,591 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#ifndef _XGI_LINUX_H_ -#define _XGI_LINUX_H_ - -#include - -#ifndef LINUX_VERSION_CODE -#include -#endif - -#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ -#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) -# error "This driver does not support pre-2.4 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -#define KERNEL_2_4 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -# error "This driver does not support 2.5 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) -#define KERNEL_2_6 -#else -# error "This driver does not support development kernels!" -#endif - -#if defined (CONFIG_SMP) && !defined (__SMP__) -#define __SMP__ -#endif - -#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) -#define MODVERSIONS -#endif - -#if defined (MODVERSIONS) && !defined (KERNEL_2_6) -#include -#endif - -#include /* printk */ -#include - -#include /* module_init, module_exit */ -#include /* pic_t, size_t, __u32, etc */ -#include /* error codes */ -#include /* circular linked list */ -#include /* NULL, offsetof */ -#include /* wait queues */ - -#include /* kmalloc, kfree, etc */ -#include /* vmalloc, vfree, etc */ - -#include /* poll_wait */ -#include /* mdelay, udelay */ -#include /* rdtsc rdtscl */ - -#include /* suser(), capable() replacement - for_each_task, for_each_process */ -#ifdef for_each_process -#define XGI_SCAN_PROCESS(p) for_each_process(p) -#else -#define XGI_SCAN_PROCESS(p) for_each_task(p) -#endif - -#ifdef KERNEL_2_6 -#include /* module_param() */ -#include /* kernel_locked */ -#include /* flush_tlb(), flush_tlb_all() */ -#include /* page table entry lookup */ -#endif - -#include /* pci_find_class, etc */ -#include /* tasklets, interrupt helpers */ -#include - -#include /* cli, sli, save_flags */ -#include /* ioremap, virt_to_phys */ -#include /* access_ok */ -#include /* PAGE_OFFSET */ -#include /* pte bit definitions */ - -#include -#include -#include - -#ifdef CONFIG_PROC_FS -#include -#endif - -#ifdef CONFIG_DEVFS_FS -#include -#endif - -#ifdef CONFIG_KMOD -#include -#endif - -#ifdef CONFIG_PM -#include -#endif - -#ifdef CONFIG_MTRR -#include -#endif - -#ifdef CONFIG_KDB -#include -#include -#endif - -#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) -#define AGPGART -#include -#include -#endif - -#ifndef MAX_ORDER -#ifdef KERNEL_2_4 -#define MAX_ORDER 10 -#endif -#ifdef KERNEL_2_6 -#define MAX_ORDER 11 -#endif -#endif - -#ifndef module_init -#define module_init(x) int init_module(void) { return x(); } -#define module_exit(x) void cleanup_module(void) { x(); } -#endif - -#ifndef minor -#define minor(x) MINOR(x) -#endif - -#ifndef IRQ_HANDLED -typedef void irqreturn_t; -#define IRQ_NONE -#define IRQ_HANDLED -#define IRQ_RETVAL(x) -#endif - -#if !defined (list_for_each) -#define list_for_each(pos, head) \ - for (pos = (head)->next, prefetch(pos->next); pos != (head); \ - pos = pos->next, prefetch(pos->next)) -#endif - -#ifdef KERNEL_2_4 -#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) -#endif -#ifdef KERNEL_2_6 -extern struct list_head pci_devices; /* list of all devices */ -#define XGI_PCI_FOR_EACH_DEV(dev) \ - for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) -#endif - -/* - * the following macro causes problems when used in the same module - * as module_param(); undef it so we don't accidentally mix the two - */ -#if defined (KERNEL_2_6) -#undef MODULE_PARM -#endif - -#ifdef EXPORT_NO_SYMBOLS -EXPORT_NO_SYMBOLS; -#endif - -#if defined (KERNEL_2_4) -#define XGI_IS_SUSER() suser() -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) -#define XGI_NUM_CPUS() smp_num_cpus -#define XGI_CLI() __cli() -#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt()) -#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") -#endif - -#if defined (KERNEL_2_6) -#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) -#define XGI_NUM_CPUS() num_online_cpus() -#define XGI_CLI() local_irq_disable() -#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) -#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) -#endif - -/* Earlier 2.4.x kernels don't have pci_disable_device() */ -#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT -#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) -#else -#define XGI_PCI_DISABLE_DEVICE(dev) -#endif - -/* common defines */ -#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) -#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) - -#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) -#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) -#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) - -#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) -#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) - -#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) -#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) - -#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number -#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) - -#ifdef XGI_PCI_GET_CLASS_PRESENT -#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) -#else -#define XGI_PCI_DEV_PUT(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) -#endif - -/* - * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver - * model is not sufficient for full acpi support. it may work in some cases, - * but not enough for us to officially support this configuration. - */ -#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) -#define XGI_PM_SUPPORT_ACPI -#endif - -#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) -#define XGI_PM_SUPPORT_APM -#endif - - -#if defined(CONFIG_DEVFS_FS) -#if defined(KERNEL_2_6) -typedef void* devfs_handle_t; -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = NULL; \ - if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ - S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ - { \ - __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ - } \ - __handle; \ - }) -/* -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) -*/ -#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") -#else // defined(KERNEL_2_4) -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ - XGI_DEV_MAJOR, _minor, \ - S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ - __handle; \ - }) - -#define XGI_DEVFS_REMOVE_DEVICE(i) \ - ({ \ - if (xgi_devfs_handles[i] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[i]); \ - } \ - }) -#define XGI_DEVFS_REMOVE_CONTROL() \ - ({ \ - if (xgi_devfs_handles[0] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[0]); \ - } \ - }) -#endif /* defined(KERNEL_2_4) */ -#endif /* defined(CONFIG_DEVFS_FS) */ - -#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) -#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) -#else -#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) -#endif - -#if defined(XGI_REMAP_PFN_RANGE_PRESENT) -#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ - remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) -#elif defined(XGI_REMAP_PAGE_RANGE_5) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#elif defined(XGI_REMAP_PAGE_RANGE_4) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) -#else -#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#endif - -#if defined(pmd_offset_map) -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset_map(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) \ - { \ - pmd_unmap(pg_mid_dir); \ - } -#else -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) -#endif - -#define XGI_PMD_PRESENT(pg_mid_dir) \ - ({ \ - if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ - { \ - XGI_PMD_UNMAP(pg_mid_dir); \ - pg_mid_dir = NULL; \ - } \ - pg_mid_dir != NULL; \ - }) - -#if defined(pte_offset_atomic) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_atomic(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_kunmap(pte); \ - } -#elif defined(pte_offset) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) -#else -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_map(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_unmap(pte); \ - } -#endif - -#define XGI_PTE_PRESENT(pte) \ - ({ \ - if (pte) \ - { \ - if (!pte_present(*pte)) \ - { \ - XGI_PTE_UNMAP(pte); pte = NULL; \ - } \ - } \ - pte != NULL; \ - }) - -#define XGI_PTE_VALUE(pte) \ - ({ \ - unsigned long __pte_value = pte_val(*pte); \ - XGI_PTE_UNMAP(pte); \ - __pte_value; \ - }) - -#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) -#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) - -#if !defined (pgprot_noncached) -static inline pgprot_t pgprot_noncached(pgprot_t old_prot) - { - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); - return new_prot; - } -#endif - -#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) -/* Added define for write combining page, only valid if pat enabled. */ -#define _PAGE_WRTCOMB _PAGE_PWT -#define __PAGE_KERNEL_WRTCOMB \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) -#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) - -static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) - { - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) - { - pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); - } - return new_prot; - } -#endif - -#if !defined(page_to_pfn) -#define page_to_pfn(page) ((page) - mem_map) -#endif - -#define XGI_VMALLOC(ptr, size) \ - { \ - (ptr) = vmalloc_32(size); \ - } - -#define XGI_VFREE(ptr, size) \ - { \ - vfree((void *) (ptr)); \ - } - -#define XGI_IOREMAP(ptr, physaddr, size) \ - { \ - (ptr) = ioremap(physaddr, size); \ - } - -#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ - { \ - (ptr) = ioremap_nocache(physaddr, size); \ - } - -#define XGI_IOUNMAP(ptr, size) \ - { \ - iounmap(ptr); \ - } - -/* - * only use this because GFP_KERNEL may sleep.. - * GFP_ATOMIC is ok, it won't sleep - */ -#define XGI_KMALLOC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_KERNEL); \ - } - -#define XGI_KMALLOC_ATOMIC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_ATOMIC); \ - } - -#define XGI_KFREE(ptr, size) \ - { \ - kfree((void *) (ptr)); \ - } - -#define XGI_GET_FREE_PAGES(ptr, order) \ - { \ - (ptr) = __get_free_pages(GFP_KERNEL, order); \ - } - -#define XGI_FREE_PAGES(ptr, order) \ - { \ - free_pages(ptr, order); \ - } - -typedef struct xgi_pte_s { - unsigned long phys_addr; - unsigned long virt_addr; -} xgi_pte_t; - -/* - * AMD Athlon processors expose a subtle bug in the Linux - * kernel, that may lead to AGP memory corruption. Recent - * kernel versions had a workaround for this problem, but - * 2.4.20 is the first kernel to address it properly. The - * page_attr API provides the means to solve the problem. - */ -#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t *page_ptr) - { - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); - } -static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t *page_ptr) - { - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL); - } -#else -#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) -#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) -#endif - -#ifdef KERNEL_2_4 -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) - -#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) -#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) -#endif - -#ifdef KERNEL_2_6 -/* add for SUSE 9, Jill*/ -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) -#else -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) -#endif -#define XGILockPage(page) SetPageLocked(page) -#define XGIUnlockPage(page) ClearPageLocked(page) -#endif - - -/* - * hide a pointer to struct xgi_info_t in a file-private info - */ - -typedef struct -{ - void *info; - U32 num_events; - spinlock_t fp_lock; - wait_queue_head_t wait_queue; -} xgi_file_private_t; - -#define FILE_PRIVATE(filp) ((filp)->private_data) - -#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) - -/* for the card devices */ -#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) - -#ifdef KERNEL_2_0 -#define INODE_FROM_FP(filp) ((filp)->f_inode) -#else -#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) -#endif - -#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) -#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) -#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) -#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) -#define XGI_ATOMIC_READ(data) atomic_read(&(data)) - -/* - * lock-related functions that should only be called from this file - */ -#define xgi_init_lock(lock) spin_lock_init(&lock) -#define xgi_lock(lock) spin_lock(&lock) -#define xgi_unlock(lock) spin_unlock(&lock) -#define xgi_down(lock) down(&lock) -#define xgi_up(lock) up(&lock) - -#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) -#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_LINUX_H_ +#define _XGI_LINUX_H_ + +#include + +#ifndef LINUX_VERSION_CODE +#include +#endif + +#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) +# error "This driver does not support pre-2.4 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define KERNEL_2_4 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +# error "This driver does not support 2.5 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +#define KERNEL_2_6 +#else +# error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +#define MODVERSIONS +#endif + +#if defined (MODVERSIONS) && !defined (KERNEL_2_6) +#include +#endif + +#include /* printk */ +#include + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ +#include /* rdtsc rdtscl */ + +#include /* suser(), capable() replacement + for_each_task, for_each_process */ +#ifdef for_each_process +#define XGI_SCAN_PROCESS(p) for_each_process(p) +#else +#define XGI_SCAN_PROCESS(p) for_each_task(p) +#endif + +#ifdef KERNEL_2_6 +#include /* module_param() */ +#include /* kernel_locked */ +#include /* flush_tlb(), flush_tlb_all() */ +#include /* page table entry lookup */ +#endif + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include + +#include /* cli, sli, save_flags */ +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ + +#include +#include +#include + +#ifdef CONFIG_PROC_FS +#include +#endif + +#ifdef CONFIG_DEVFS_FS +#include +#endif + +#ifdef CONFIG_KMOD +#include +#endif + +#ifdef CONFIG_PM +#include +#endif + +#ifdef CONFIG_MTRR +#include +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) +#define AGPGART +#include +#include +#endif + +#ifndef MAX_ORDER +#ifdef KERNEL_2_4 +#define MAX_ORDER 10 +#endif +#ifdef KERNEL_2_6 +#define MAX_ORDER 11 +#endif +#endif + +#ifndef module_init +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#endif + +#ifndef minor +#define minor(x) MINOR(x) +#endif + +#ifndef IRQ_HANDLED +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif + +#if !defined (list_for_each) +#define list_for_each(pos, head) \ + for (pos = (head)->next, prefetch(pos->next); pos != (head); \ + pos = pos->next, prefetch(pos->next)) +#endif + +#ifdef KERNEL_2_4 +#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) +#endif +#ifdef KERNEL_2_6 +extern struct list_head pci_devices; /* list of all devices */ +#define XGI_PCI_FOR_EACH_DEV(dev) \ + for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) +#endif + +/* + * the following macro causes problems when used in the same module + * as module_param(); undef it so we don't accidentally mix the two + */ +#if defined (KERNEL_2_6) +#undef MODULE_PARM +#endif + +#ifdef EXPORT_NO_SYMBOLS +EXPORT_NO_SYMBOLS; +#endif + +#if defined (KERNEL_2_4) +#define XGI_IS_SUSER() suser() +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) +#define XGI_NUM_CPUS() smp_num_cpus +#define XGI_CLI() __cli() +#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt()) +#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") +#endif + +#if defined (KERNEL_2_6) +#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) +#define XGI_NUM_CPUS() num_online_cpus() +#define XGI_CLI() local_irq_disable() +#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) +#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) +#endif + +/* Earlier 2.4.x kernels don't have pci_disable_device() */ +#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT +#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) +#else +#define XGI_PCI_DISABLE_DEVICE(dev) +#endif + +/* common defines */ +#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) +#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) + +#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) +#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) + +#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) +#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) + +#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) +#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) + +#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number +#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) + +#ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) +#else +#define XGI_PCI_DEV_PUT(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) +#endif + +/* + * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver + * model is not sufficient for full acpi support. it may work in some cases, + * but not enough for us to officially support this configuration. + */ +#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#define XGI_PM_SUPPORT_ACPI +#endif + +#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) +#define XGI_PM_SUPPORT_APM +#endif + +#if defined(CONFIG_DEVFS_FS) +#if defined(KERNEL_2_6) +typedef void *devfs_handle_t; +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = NULL; \ + if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ + S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ + { \ + __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ + } \ + __handle; \ + }) +/* +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) +*/ +#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") +#else // defined(KERNEL_2_4) +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ + XGI_DEV_MAJOR, _minor, \ + S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ + __handle; \ + }) + +#define XGI_DEVFS_REMOVE_DEVICE(i) \ + ({ \ + if (xgi_devfs_handles[i] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[i]); \ + } \ + }) +#define XGI_DEVFS_REMOVE_CONTROL() \ + ({ \ + if (xgi_devfs_handles[0] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[0]); \ + } \ + }) +#endif /* defined(KERNEL_2_4) */ +#endif /* defined(CONFIG_DEVFS_FS) */ + +#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) +#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) +#else +#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) +#endif + +#if defined(XGI_REMAP_PFN_RANGE_PRESENT) +#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ + remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) +#elif defined(XGI_REMAP_PAGE_RANGE_5) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#elif defined(XGI_REMAP_PAGE_RANGE_4) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) +#else +#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#endif + +#if defined(pmd_offset_map) +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset_map(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) \ + { \ + pmd_unmap(pg_mid_dir); \ + } +#else +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) +#endif + +#define XGI_PMD_PRESENT(pg_mid_dir) \ + ({ \ + if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ + { \ + XGI_PMD_UNMAP(pg_mid_dir); \ + pg_mid_dir = NULL; \ + } \ + pg_mid_dir != NULL; \ + }) + +#if defined(pte_offset_atomic) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_atomic(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_kunmap(pte); \ + } +#elif defined(pte_offset) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) +#else +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_map(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_unmap(pte); \ + } +#endif + +#define XGI_PTE_PRESENT(pte) \ + ({ \ + if (pte) \ + { \ + if (!pte_present(*pte)) \ + { \ + XGI_PTE_UNMAP(pte); pte = NULL; \ + } \ + } \ + pte != NULL; \ + }) + +#define XGI_PTE_VALUE(pte) \ + ({ \ + unsigned long __pte_value = pte_val(*pte); \ + XGI_PTE_UNMAP(pte); \ + __pte_value; \ + }) + +#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) +#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) +{ + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; +} +#endif + +#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) +/* Added define for write combining page, only valid if pat enabled. */ +#define _PAGE_WRTCOMB _PAGE_PWT +#define __PAGE_KERNEL_WRTCOMB \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) +#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) + +static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) +{ + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) { + pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); + } + return new_prot; +} +#endif + +#if !defined(page_to_pfn) +#define page_to_pfn(page) ((page) - mem_map) +#endif + +#define XGI_VMALLOC(ptr, size) \ + { \ + (ptr) = vmalloc_32(size); \ + } + +#define XGI_VFREE(ptr, size) \ + { \ + vfree((void *) (ptr)); \ + } + +#define XGI_IOREMAP(ptr, physaddr, size) \ + { \ + (ptr) = ioremap(physaddr, size); \ + } + +#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ + { \ + (ptr) = ioremap_nocache(physaddr, size); \ + } + +#define XGI_IOUNMAP(ptr, size) \ + { \ + iounmap(ptr); \ + } + +/* + * only use this because GFP_KERNEL may sleep.. + * GFP_ATOMIC is ok, it won't sleep + */ +#define XGI_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_KERNEL); \ + } + +#define XGI_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_ATOMIC); \ + } + +#define XGI_KFREE(ptr, size) \ + { \ + kfree((void *) (ptr)); \ + } + +#define XGI_GET_FREE_PAGES(ptr, order) \ + { \ + (ptr) = __get_free_pages(GFP_KERNEL, order); \ + } + +#define XGI_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +typedef struct xgi_pte_s { + unsigned long phys_addr; + unsigned long virt_addr; +} xgi_pte_t; + +/* + * AMD Athlon processors expose a subtle bug in the Linux + * kernel, that may lead to AGP memory corruption. Recent + * kernel versions had a workaround for this problem, but + * 2.4.20 is the first kernel to address it properly. The + * page_attr API provides the means to solve the problem. + */ +#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) +{ + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); +} +static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) +{ + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL); +} +#else +#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) +#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) +#endif + +#ifdef KERNEL_2_4 +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) + +#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) +#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) +#endif + +#ifdef KERNEL_2_6 +/* add for SUSE 9, Jill*/ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) +#else +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) +#endif +#define XGILockPage(page) SetPageLocked(page) +#define XGIUnlockPage(page) ClearPageLocked(page) +#endif + +/* + * hide a pointer to struct xgi_info_t in a file-private info + */ + +typedef struct { + void *info; + U32 num_events; + spinlock_t fp_lock; + wait_queue_head_t wait_queue; +} xgi_file_private_t; + +#define FILE_PRIVATE(filp) ((filp)->private_data) + +#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) + +/* for the card devices */ +#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) + +#ifdef KERNEL_2_0 +#define INODE_FROM_FP(filp) ((filp)->f_inode) +#else +#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) +#endif + +#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) +#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) +#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) +#define XGI_ATOMIC_READ(data) atomic_read(&(data)) + +/* + * lock-related functions that should only be called from this file + */ +#define xgi_init_lock(lock) spin_lock_init(&lock) +#define xgi_lock(lock) spin_lock(&lock) +#define xgi_unlock(lock) spin_unlock(&lock) +#define xgi_down(lock) down(&lock) +#define xgi_up(lock) up(&lock) + +#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) +#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) + +#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index b15c7ecf..61e40594 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -1,657 +1,630 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_pcie.h" - -void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t *req) -{ - req->device_id = info->device_id; - req->device_name[0] = 'x'; - req->device_name[1] = 'g'; - req->device_name[2] = '4'; - req->device_name[3] = '7'; - req->vendor_id = info->vendor_id; - req->curr_display_mode = 0; - req->fb_size = info->fb.size; - req->sarea_bus_addr = info->sarea_info.bus_addr; - req->sarea_size = info->sarea_info.size; -} - -void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req) -{ - req->mmioBase = (void *)info->mmio.base; - req->size = info->mmio.size; -} - -void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req) -{ - info->scrn_info.scrn_start = req->scrn_start; - info->scrn_info.scrn_xres = req->scrn_xres; - info->scrn_info.scrn_yres = req->scrn_yres; - info->scrn_info.scrn_bpp = req->scrn_bpp; - info->scrn_info.scrn_pitch = req->scrn_pitch; - - XGI_INFO("info->scrn_info.scrn_start: 0x%lx" - "info->scrn_info.scrn_xres: 0x%lx" - "info->scrn_info.scrn_yres: 0x%lx" - "info->scrn_info.scrn_bpp: 0x%lx" - "info->scrn_info.scrn_pitch: 0x%lx\n", - info->scrn_info.scrn_start, - info->scrn_info.scrn_xres, - info->scrn_info.scrn_yres, - info->scrn_info.scrn_bpp, - info->scrn_info.scrn_pitch); -} - -void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req) -{ - req->scrn_start = info->scrn_info.scrn_start; - req->scrn_xres = info->scrn_info.scrn_xres; - req->scrn_yres = info->scrn_info.scrn_yres; - req->scrn_bpp = info->scrn_info.scrn_bpp; - req->scrn_pitch = info->scrn_info.scrn_pitch; - - XGI_INFO("req->scrn_start: 0x%lx" - "req->scrn_xres: 0x%lx" - "req->scrn_yres: 0x%lx" - "req->scrn_bpp: 0x%lx" - "req->scrn_pitch: 0x%lx\n", - req->scrn_start, - req->scrn_xres, - req->scrn_yres, - req->scrn_bpp, - req->scrn_pitch); -} - -void xgi_ge_reset(xgi_info_t *info) -{ - xgi_disable_ge(info); - xgi_enable_ge(info); -} - -void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req) -{ - info->sarea_info.bus_addr = req->bus_addr; - info->sarea_info.size = req->size; - XGI_INFO("info->sarea_info.bus_addr: 0x%lx" - "info->sarea_info.size: 0x%lx\n", - info->sarea_info.bus_addr, - info->sarea_info.size); -} - -/* - * irq functions - */ -#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff - -static U32 s_invalid_begin = 0; - -BOOL xgi_ge_irq_handler(xgi_info_t *info) -{ - volatile U8 *mmio_vbase = info->mmio.vbase; - volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); - U32 int_status = ge_3d_status[4]; // interrupt status - U32 auto_reset_count = 0; - BOOL is_support_auto_reset = FALSE; - - // Check GE on/off - if (0 == (0xffffc0f0 & int_status)) - { - U32 old_ge_status = ge_3d_status[0x00]; - U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; - if (0 != (0x1000 & int_status)) - { - // We got GE stall interrupt. - ge_3d_status[0x04] = int_status | 0x04000000; - - if (TRUE == is_support_auto_reset) - { - BOOL is_wrong_signal = FALSE; - static U32 last_int_tick_low, last_int_tick_high; - static U32 new_int_tick_low, new_int_tick_high; - static U32 continoue_int_count = 0; - // OE II is busy. - while (old_ge_status & 0x001c0000) - { - U16 check; - // Check Read back status - *(mmio_vbase + 0x235c) = 0x80; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if ((check & 0x3f) != ((check & 0x3f00) >> 8)) - { - is_wrong_signal = TRUE; - break; - } - // Check RO channel - *(mmio_vbase + 0x235c) = 0x83; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if ((check & 0x0f) != ((check & 0xf0) >> 4)) - { - is_wrong_signal = TRUE; - break; - } - // Check RW channel - *(mmio_vbase + 0x235c) = 0x88; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if ((check & 0x0f) != ((check & 0xf0) >> 4)) - { - is_wrong_signal = TRUE; - break; - } - // Check RO channel outstanding - *(mmio_vbase + 0x235c) = 0x8f; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if (0 != (check & 0x3ff)) - { - is_wrong_signal = TRUE; - break; - } - // Check RW channel outstanding - *(mmio_vbase + 0x235c) = 0x90; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if (0 != (check & 0x3ff)) - { - is_wrong_signal = TRUE; - break; - } - // No pending PCIE request. GE stall. - break; - } - - if (is_wrong_signal) - { - // Nothing but skip. - } - else if (0 == continoue_int_count++) - { - rdtsc(last_int_tick_low, last_int_tick_high); - } - else - { - rdtscl(new_int_tick_low); - if ((new_int_tick_low - last_int_tick_low) > STALL_INTERRUPT_RESET_THRESHOLD) - { - continoue_int_count = 0; - } - else if (continoue_int_count >= 3) - { - continoue_int_count = 0; - - // GE Hung up, need reset. - XGI_INFO("Reset GE!\n"); - - *(mmio_vbase + 0xb057) = 8; - int time_out = 0xffff; - while (0 != (ge_3d_status[0x00] & 0xf0000000)) - { - while (0 != ((--time_out) & 0xfff)); - if (0 == time_out) - { - XGI_INFO("Can not reset back 0x%lx!\n", ge_3d_status[0x00]); - *(mmio_vbase + 0xb057) = 0; - // Have to use 3x5.36 to reset. - // Save and close dynamic gating - U8 old_3ce = *(mmio_vbase + 0x3ce); - *(mmio_vbase + 0x3ce) = 0x2a; - U8 old_3cf = *(mmio_vbase + 0x3cf); - *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; - // Reset GE - U8 old_index = *(mmio_vbase + 0x3d4); - *(mmio_vbase + 0x3d4) = 0x36; - U8 old_36 = *(mmio_vbase + 0x3d5); - *(mmio_vbase + 0x3d5) = old_36 | 0x10; - while (0 != ((--time_out) & 0xfff)); - *(mmio_vbase + 0x3d5) = old_36; - *(mmio_vbase + 0x3d4) = old_index; - // Restore dynamic gating - *(mmio_vbase + 0x3cf) = old_3cf; - *(mmio_vbase + 0x3ce) = old_3ce; - break; - } - } - *(mmio_vbase + 0xb057) = 0; - - // Increase Reset counter - auto_reset_count++; - } - } - } - return TRUE; - } - else if (0 != (0x1 & int_status)) - { - s_invalid_begin++; - ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; - return TRUE; - } - } - return FALSE; -} - -BOOL xgi_crt_irq_handler(xgi_info_t *info) -{ - BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; - U8 save_3ce = bReadReg(0x3ce); - - - if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened - { - U8 op3cf_3d; - U8 op3cf_37; - - // What happened? - op3cf_37 = bIn3cf(0x37); - -#if 0 - if (op3cf_37 & 0x04) - device_status |= GDEVST_CONNECT; - else - device_status &= ~GDEVST_CONNECT; - - device_status |= GDEVST_DEVICE_CHANGED; - hw_status |= HWST_DEVICE_CHANGED; -#endif - // Clear CRT interrupt - op3cf_3d = bIn3cf(0x3d); - bOut3cf(0x3d, (op3cf_3d | 0x04)); - bOut3cf(0x3d, (op3cf_3d & ~0x04)); - ret = TRUE; - } - bWriteReg(0x3ce, save_3ce); - - return (ret); -} - -BOOL xgi_dvi_irq_handler(xgi_info_t *info) -{ - BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; - U8 save_3ce = bReadReg(0x3ce); - - if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened - { - U8 op3cf_39; - U8 op3cf_37; - U8 op3x5_5a; - U8 save_3x4 = bReadReg(0x3d4);; - - // What happened? - op3cf_37 = bIn3cf(0x37); -#if 0 - //Also update our internal flag - if (op3cf_37 & 0x10) // Second Monitor plugged In - { - device_status |= GDEVST_CONNECT; - //Because currenly we cannot determine if DVI digital - //or DVI analog is connected according to DVI interrupt - //We should still call BIOS to check it when utility ask us - device_status &= ~GDEVST_CHECKED; - } - else - { - device_status &= ~GDEVST_CONNECT; - } -#endif - //Notify BIOS that DVI plug/unplug happened - op3x5_5a = bIn3x5(0x5a); - bOut3x5(0x5a, op3x5_5a & 0xf7); - - bWriteReg(0x3d4, save_3x4); - - //device_status |= GDEVST_DEVICE_CHANGED; - //hw_status |= HWST_DEVICE_CHANGED; - - // Clear DVI interrupt - op3cf_39 = bIn3cf(0x39); - bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 - bOut3c5(0x39, (op3cf_39 | 0x01 )); //Set 3cf.39 bit 0 to 1 - - ret = TRUE; - } - bWriteReg(0x3ce, save_3ce); - - return (ret); -} - -void xgi_dump_register(xgi_info_t *info) -{ - int i, j; - unsigned char temp; - - // 0x3C5 - printk("\r\n=====xgi_dump_register========0x%x===============\r\n", 0x3C5); - - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bIn3c5(i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3D5 - printk("\r\n====xgi_dump_register=========0x%x===============\r\n", 0x3D5); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bIn3x5(i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3CF - printk("\r\n=========xgi_dump_register====0x%x===============\r\n", 0x3CF); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bIn3cf(i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n=====xgi_dump_register======0x%x===============\r\n", 0xB000); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x5; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0xB000 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2200); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0xB; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2200 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2300); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x7; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2300 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2400); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2400 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2800); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2800 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } -} - -void xgi_restore_registers(xgi_info_t *info) -{ - bOut3x5(0x13, 0); - bOut3x5(0x8b, 2); -} - -void xgi_waitfor_pci_idle(xgi_info_t *info) -{ -#define WHOLD_GE_STATUS 0x2800 -#define IDLE_MASK ~0x90200000 - - int idleCount = 0; - while(idleCount < 5) - { - if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) - { - idleCount = 0; - } - else - { - idleCount ++; - } - } -} - -int xgi_get_cpu_id(struct cpu_info_s *arg) -{ - int op = arg->_eax; - __asm__("cpuid" - : "=a" (arg->_eax), - "=b" (arg->_ebx), - "=c" (arg->_ecx), - "=d" (arg->_edx) - : "0" (op)); - - XGI_INFO("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", - op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); -} - -/*memory collect function*/ -extern struct list_head xgi_mempid_list; -void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt) -{ - xgi_mem_pid_t *mempid_block; - struct list_head *mempid_list; - struct task_struct *p,*find; - unsigned int cnt = 0; - - mempid_list = xgi_mempid_list.next; - - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - mempid_list = mempid_list->next; - - find = NULL; - XGI_SCAN_PROCESS(p) - { - if (p->pid == mempid_block->pid) - { - XGI_INFO("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", mempid_block->pid, p->state, mempid_block->location, mempid_block->bus_addr); - find = p; - if (mempid_block->bus_addr == 0xFFFFFFFF) - ++cnt; - break; - } - } - if (!find) - { - if (mempid_block->location == LOCAL) - { - XGI_INFO("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); - xgi_fb_free(info, mempid_block->bus_addr); - } - else if (mempid_block->bus_addr != 0xFFFFFFFF) - { - XGI_INFO("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); - xgi_pcie_free(info, mempid_block->bus_addr); - } - else - { - /*only delete the memory block*/ - list_del(&mempid_block->list); - XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_block->pid); - kfree(mempid_block); - } - } - } - *pcnt = cnt; -} + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" + +void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) +{ + req->device_id = info->device_id; + req->device_name[0] = 'x'; + req->device_name[1] = 'g'; + req->device_name[2] = '4'; + req->device_name[3] = '7'; + req->vendor_id = info->vendor_id; + req->curr_display_mode = 0; + req->fb_size = info->fb.size; + req->sarea_bus_addr = info->sarea_info.bus_addr; + req->sarea_size = info->sarea_info.size; +} + +void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req) +{ + req->mmioBase = (void *)info->mmio.base; + req->size = info->mmio.size; +} + +void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +{ + info->scrn_info.scrn_start = req->scrn_start; + info->scrn_info.scrn_xres = req->scrn_xres; + info->scrn_info.scrn_yres = req->scrn_yres; + info->scrn_info.scrn_bpp = req->scrn_bpp; + info->scrn_info.scrn_pitch = req->scrn_pitch; + + XGI_INFO("info->scrn_info.scrn_start: 0x%lx" + "info->scrn_info.scrn_xres: 0x%lx" + "info->scrn_info.scrn_yres: 0x%lx" + "info->scrn_info.scrn_bpp: 0x%lx" + "info->scrn_info.scrn_pitch: 0x%lx\n", + info->scrn_info.scrn_start, + info->scrn_info.scrn_xres, + info->scrn_info.scrn_yres, + info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); +} + +void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +{ + req->scrn_start = info->scrn_info.scrn_start; + req->scrn_xres = info->scrn_info.scrn_xres; + req->scrn_yres = info->scrn_info.scrn_yres; + req->scrn_bpp = info->scrn_info.scrn_bpp; + req->scrn_pitch = info->scrn_info.scrn_pitch; + + XGI_INFO("req->scrn_start: 0x%lx" + "req->scrn_xres: 0x%lx" + "req->scrn_yres: 0x%lx" + "req->scrn_bpp: 0x%lx" + "req->scrn_pitch: 0x%lx\n", + req->scrn_start, + req->scrn_xres, + req->scrn_yres, req->scrn_bpp, req->scrn_pitch); +} + +void xgi_ge_reset(xgi_info_t * info) +{ + xgi_disable_ge(info); + xgi_enable_ge(info); +} + +void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) +{ + info->sarea_info.bus_addr = req->bus_addr; + info->sarea_info.size = req->size; + XGI_INFO("info->sarea_info.bus_addr: 0x%lx" + "info->sarea_info.size: 0x%lx\n", + info->sarea_info.bus_addr, info->sarea_info.size); +} + +/* + * irq functions + */ +#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff + +static U32 s_invalid_begin = 0; + +BOOL xgi_ge_irq_handler(xgi_info_t * info) +{ + volatile U8 *mmio_vbase = info->mmio.vbase; + volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); + U32 int_status = ge_3d_status[4]; // interrupt status + U32 auto_reset_count = 0; + BOOL is_support_auto_reset = FALSE; + + // Check GE on/off + if (0 == (0xffffc0f0 & int_status)) { + U32 old_ge_status = ge_3d_status[0x00]; + U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + if (0 != (0x1000 & int_status)) { + // We got GE stall interrupt. + ge_3d_status[0x04] = int_status | 0x04000000; + + if (TRUE == is_support_auto_reset) { + BOOL is_wrong_signal = FALSE; + static U32 last_int_tick_low, + last_int_tick_high; + static U32 new_int_tick_low, new_int_tick_high; + static U32 continoue_int_count = 0; + // OE II is busy. + while (old_ge_status & 0x001c0000) { + U16 check; + // Check Read back status + *(mmio_vbase + 0x235c) = 0x80; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if ((check & 0x3f) != + ((check & 0x3f00) >> 8)) { + is_wrong_signal = TRUE; + break; + } + // Check RO channel + *(mmio_vbase + 0x235c) = 0x83; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if ((check & 0x0f) != + ((check & 0xf0) >> 4)) { + is_wrong_signal = TRUE; + break; + } + // Check RW channel + *(mmio_vbase + 0x235c) = 0x88; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if ((check & 0x0f) != + ((check & 0xf0) >> 4)) { + is_wrong_signal = TRUE; + break; + } + // Check RO channel outstanding + *(mmio_vbase + 0x235c) = 0x8f; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if (0 != (check & 0x3ff)) { + is_wrong_signal = TRUE; + break; + } + // Check RW channel outstanding + *(mmio_vbase + 0x235c) = 0x90; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if (0 != (check & 0x3ff)) { + is_wrong_signal = TRUE; + break; + } + // No pending PCIE request. GE stall. + break; + } + + if (is_wrong_signal) { + // Nothing but skip. + } else if (0 == continoue_int_count++) { + rdtsc(last_int_tick_low, + last_int_tick_high); + } else { + rdtscl(new_int_tick_low); + if ((new_int_tick_low - + last_int_tick_low) > + STALL_INTERRUPT_RESET_THRESHOLD) { + continoue_int_count = 0; + } else if (continoue_int_count >= 3) { + continoue_int_count = 0; + + // GE Hung up, need reset. + XGI_INFO("Reset GE!\n"); + + *(mmio_vbase + 0xb057) = 8; + int time_out = 0xffff; + while (0 != + (ge_3d_status[0x00] & + 0xf0000000)) { + while (0 != + ((--time_out) & + 0xfff)) ; + if (0 == time_out) { + XGI_INFO + ("Can not reset back 0x%lx!\n", + ge_3d_status + [0x00]); + *(mmio_vbase + + 0xb057) = 0; + // Have to use 3x5.36 to reset. + // Save and close dynamic gating + U8 old_3ce = + *(mmio_vbase + + 0x3ce); + *(mmio_vbase + + 0x3ce) = 0x2a; + U8 old_3cf = + *(mmio_vbase + + 0x3cf); + *(mmio_vbase + + 0x3cf) = + old_3cf & 0xfe; + // Reset GE + U8 old_index = + *(mmio_vbase + + 0x3d4); + *(mmio_vbase + + 0x3d4) = 0x36; + U8 old_36 = + *(mmio_vbase + + 0x3d5); + *(mmio_vbase + + 0x3d5) = + old_36 | 0x10; + while (0 != + ((--time_out) & 0xfff)) ; + *(mmio_vbase + + 0x3d5) = + old_36; + *(mmio_vbase + + 0x3d4) = + old_index; + // Restore dynamic gating + *(mmio_vbase + + 0x3cf) = + old_3cf; + *(mmio_vbase + + 0x3ce) = + old_3ce; + break; + } + } + *(mmio_vbase + 0xb057) = 0; + + // Increase Reset counter + auto_reset_count++; + } + } + } + return TRUE; + } else if (0 != (0x1 & int_status)) { + s_invalid_begin++; + ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; + return TRUE; + } + } + return FALSE; +} + +BOOL xgi_crt_irq_handler(xgi_info_t * info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened + { + U8 op3cf_3d; + U8 op3cf_37; + + // What happened? + op3cf_37 = bIn3cf(0x37); + +#if 0 + if (op3cf_37 & 0x04) + device_status |= GDEVST_CONNECT; + else + device_status &= ~GDEVST_CONNECT; + + device_status |= GDEVST_DEVICE_CHANGED; + hw_status |= HWST_DEVICE_CHANGED; +#endif + // Clear CRT interrupt + op3cf_3d = bIn3cf(0x3d); + bOut3cf(0x3d, (op3cf_3d | 0x04)); + bOut3cf(0x3d, (op3cf_3d & ~0x04)); + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +BOOL xgi_dvi_irq_handler(xgi_info_t * info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened + { + U8 op3cf_39; + U8 op3cf_37; + U8 op3x5_5a; + U8 save_3x4 = bReadReg(0x3d4);; + + // What happened? + op3cf_37 = bIn3cf(0x37); +#if 0 + //Also update our internal flag + if (op3cf_37 & 0x10) // Second Monitor plugged In + { + device_status |= GDEVST_CONNECT; + //Because currenly we cannot determine if DVI digital + //or DVI analog is connected according to DVI interrupt + //We should still call BIOS to check it when utility ask us + device_status &= ~GDEVST_CHECKED; + } else { + device_status &= ~GDEVST_CONNECT; + } +#endif + //Notify BIOS that DVI plug/unplug happened + op3x5_5a = bIn3x5(0x5a); + bOut3x5(0x5a, op3x5_5a & 0xf7); + + bWriteReg(0x3d4, save_3x4); + + //device_status |= GDEVST_DEVICE_CHANGED; + //hw_status |= HWST_DEVICE_CHANGED; + + // Clear DVI interrupt + op3cf_39 = bIn3cf(0x39); + bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 + bOut3c5(0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 + + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +void xgi_dump_register(xgi_info_t * info) +{ + int i, j; + unsigned char temp; + + // 0x3C5 + printk("\r\n=====xgi_dump_register========0x%x===============\r\n", + 0x3C5); + + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bIn3c5(i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3D5 + printk("\r\n====xgi_dump_register=========0x%x===============\r\n", + 0x3D5); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bIn3x5(i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3CF + printk("\r\n=========xgi_dump_register====0x%x===============\r\n", + 0x3CF); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bIn3cf(i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n=====xgi_dump_register======0x%x===============\r\n", + 0xB000); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x5; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0xB000 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2200); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0xB; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2200 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2300); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x7; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2300 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2400); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2400 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2800); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2800 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } +} + +void xgi_restore_registers(xgi_info_t * info) +{ + bOut3x5(0x13, 0); + bOut3x5(0x8b, 2); +} + +void xgi_waitfor_pci_idle(xgi_info_t * info) +{ +#define WHOLD_GE_STATUS 0x2800 +#define IDLE_MASK ~0x90200000 + + int idleCount = 0; + while (idleCount < 5) { + if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) { + idleCount = 0; + } else { + idleCount++; + } + } +} + +int xgi_get_cpu_id(struct cpu_info_s *arg) +{ + int op = arg->_eax; + __asm__("cpuid":"=a"(arg->_eax), + "=b"(arg->_ebx), + "=c"(arg->_ecx), "=d"(arg->_edx) + : "0"(op)); + + XGI_INFO + ("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", + op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); +} + +/*memory collect function*/ +extern struct list_head xgi_mempid_list; +void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) +{ + xgi_mem_pid_t *mempid_block; + struct list_head *mempid_list; + struct task_struct *p, *find; + unsigned int cnt = 0; + + mempid_list = xgi_mempid_list.next; + + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + mempid_list = mempid_list->next; + + find = NULL; + XGI_SCAN_PROCESS(p) { + if (p->pid == mempid_block->pid) { + XGI_INFO + ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", + mempid_block->pid, p->state, + mempid_block->location, + mempid_block->bus_addr); + find = p; + if (mempid_block->bus_addr == 0xFFFFFFFF) + ++cnt; + break; + } + } + if (!find) { + if (mempid_block->location == LOCAL) { + XGI_INFO + ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", + mempid_block->pid, mempid_block->bus_addr); + xgi_fb_free(info, mempid_block->bus_addr); + } else if (mempid_block->bus_addr != 0xFFFFFFFF) { + XGI_INFO + ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", + mempid_block->pid, mempid_block->bus_addr); + xgi_pcie_free(info, mempid_block->bus_addr); + } else { + /*only delete the memory block */ + list_del(&mempid_block->list); + XGI_INFO + ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", + mempid_block->pid); + kfree(mempid_block); + } + } + } + *pcnt = cnt; +} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index ac4daaa1..37120aaa 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -1,49 +1,47 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#ifndef _XGI_MISC_H_ -#define _XGI_MISC_H_ - -extern void xgi_dump_register(xgi_info_t *info); -extern void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t * req); -extern void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req); -extern void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req); -extern void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req); -extern void xgi_ge_reset(xgi_info_t *info); -extern void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req); -extern int xgi_get_cpu_id(struct cpu_info_s *arg); - -extern void xgi_restore_registers(xgi_info_t *info); -extern BOOL xgi_ge_irq_handler(xgi_info_t *info); -extern BOOL xgi_crt_irq_handler(xgi_info_t *info); -extern BOOL xgi_dvi_irq_handler(xgi_info_t *info); -extern void xgi_waitfor_pci_idle(xgi_info_t *info); - - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_MISC_H_ +#define _XGI_MISC_H_ + +extern void xgi_dump_register(xgi_info_t * info); +extern void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req); +extern void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req); +extern void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req); +extern void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req); +extern void xgi_ge_reset(xgi_info_t * info); +extern void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req); +extern int xgi_get_cpu_id(struct cpu_info_s *arg); + +extern void xgi_restore_registers(xgi_info_t * info); +extern BOOL xgi_ge_irq_handler(xgi_info_t * info); +extern BOOL xgi_crt_irq_handler(xgi_info_t * info); +extern BOOL xgi_dvi_irq_handler(xgi_info_t * info); +extern void xgi_waitfor_pci_idle(xgi_info_t * info); + +#endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 62e2323f..9457770a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -1,1060 +1,1031 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_pcie.h" -#include "xgi_misc.h" - -static xgi_pcie_heap_t *xgi_pcie_heap = NULL; -static kmem_cache_t *xgi_pcie_cache_block = NULL; -static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; -static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; -static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; -extern struct list_head xgi_mempid_list; - -static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) -{ - struct page *page; - unsigned long page_addr = 0; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - - if (page_addr == 0UL) - { - XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", - page_count); - return 0; - } - - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) - { - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - } - - XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", - page_count, page_order, page_addr); - return page_addr; -} - -static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) -{ - struct page *page; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) - { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - - free_pages(page_addr, page_order); -} - -static int xgi_pcie_lut_init(xgi_info_t *info) -{ - unsigned char *page_addr = NULL; - unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; - unsigned long count = 0; - u8 temp = 0; - - /* Jong 06/06/2006 */ - unsigned long pcie_aperture_size; - - info->pcie.size = 128 * 1024 * 1024; - - /* Get current FB aperture size */ - temp = In3x5(0x27); - XGI_INFO("In3x5(0x27): 0x%x \n", temp); - - if (temp & 0x01) /* 256MB; Jong 06/05/2006; 0x10000000 */ - { - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size=256 * 1024 * 1024; - /* info->pcie.base = 256 * 1024 * 1024; */ /* pcie base is different from fb base */ - } - else /* 128MB; Jong 06/05/2006; 0x08000000 */ - { - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size=128 * 1024 * 1024; - /* info->pcie.base = 128 * 1024 * 1024; */ - } - - /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ - /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ - /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ - /* info->pcie.base=ioremap(0x0F000000, 0x10000000); */ /* Cause system hang */ - info->pcie.base=pcie_aperture_size; /* works */ - /* info->pcie.base=info->fb.base + info->fb.size; */ /* System hang */ - /* info->pcie.base=128 * 1024 * 1024;*/ /* System hang */ - - XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); - - - /* Get current lookup table page size */ - temp = bReadReg(0xB00C); - if (temp & 0x04) /* 8KB */ - { - info->lutPageSize = 8 * 1024; - } - else /* 4KB */ - { - info->lutPageSize = 4 * 1024; - } - - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); - -#if 0 - /* Get current lookup table location */ - temp = bReadReg(0xB00C); - if (temp & 0x02) /* LFB */ - { - info->isLUTInLFB = TRUE; - /* Current we only support lookup table in LFB */ - temp &= 0xFD; - bWriteReg(0xB00C, temp); - info->isLUTInLFB = FALSE; - } - else /* SFB */ - { - info->isLUTInLFB = FALSE; - } - - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); - - /* Get current SDFB page size */ - temp = bReadReg(0xB00C); - if (temp & 0x08) /* 8MB */ - { - info->sdfbPageSize = 8 * 1024 * 1024; - } - else /* 4MB */ - { - info->sdfbPageSize = 4 * 1024 * 1024; - } -#endif - pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; - - /* - * Allocate memory for PCIE GART table; - */ - lutEntryNum = pciePageCount; - lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; - - /* get page_order base on page_count */ - count = lutPageCount; - for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder); - - if ((lutPageCount << 1) == (1 << lutPageOrder)) - { - lutPageOrder -= 1; - } - - XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", - lutEntryNum, lutPageCount, lutPageOrder); - - info->lutPageOrder = lutPageOrder; - page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); - - if (!page_addr) - { - XGI_ERROR("cannot allocate PCIE lut page!\n"); - goto fail; - } - info->lut_base = (unsigned long *)page_addr; - - XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", - page_addr, virt_to_phys(page_addr)); - - XGI_INFO("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", - info->lut_base, __pa(info->lut_base), info->lutPageOrder); - - /* - * clean all PCIE GART Entry - */ - memset(page_addr, 0, PAGE_SIZE << lutPageOrder); - -#if defined(__i386__) || defined(__x86_64__) - asm volatile ( "wbinvd" ::: "memory" ); -#else - mb(); -#endif - - /* Set GART in SFB */ - bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); - /* Set GART base address to HW */ - dwWriteReg(0xB034, __pa(info->lut_base)); - - return 1; -fail: - return 0; -} - -static void xgi_pcie_lut_cleanup(xgi_info_t *info) -{ - if (info->lut_base) - { - XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", - info->lut_base, info->lutPageOrder); - xgi_pcie_lut_free((unsigned long)info->lut_base, info->lutPageOrder); - info->lut_base = NULL; - } -} - -static xgi_pcie_block_t *xgi_pcie_new_node(void) -{ - xgi_pcie_block_t *block = (xgi_pcie_block_t *)kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL); - if (block == NULL) - { - return NULL; - } - - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = 0; /* The block size. */ - block->bus_addr = 0; /* CPU access address/bus address */ - block->hw_addr = 0; /* GE access address */ - block->page_count = 0; - block->page_order = 0; - block->page_block = NULL; - block->page_table = NULL; - block->owner = PCIE_INVALID; - - return block; -} - -static void xgi_pcie_block_stuff_free(xgi_pcie_block_t *block) -{ - struct page *page; - xgi_page_block_t *page_block = block->page_block; - xgi_page_block_t *free_block; - unsigned long page_count = 0; - int i; - - //XGI_INFO("block->page_block: 0x%p \n", block->page_block); - while (page_block) - { - page_count = page_block->page_count; - - page = virt_to_page(page_block->virt_addr); - for (i = 0; i < page_count; i++, page++) - { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - free_pages(page_block->virt_addr, page_block->page_order); - - page_block->phys_addr = 0; - page_block->virt_addr = 0; - page_block->page_count = 0; - page_block->page_order = 0; - - free_block = page_block; - page_block = page_block->next; - //XGI_INFO("free free_block: 0x%p \n", free_block); - kfree(free_block); - free_block = NULL; - } - - if (block->page_table) - { - //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); - kfree(block->page_table); - block->page_table = NULL; - } -} - -int xgi_pcie_heap_init(xgi_info_t *info) -{ - xgi_pcie_block_t *block; - - if (!xgi_pcie_lut_init(info)) - { - XGI_ERROR("xgi_pcie_lut_init failed\n"); - return 0; - } - - xgi_pcie_heap = (xgi_pcie_heap_t *)kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); - if(!xgi_pcie_heap) - { - XGI_ERROR("xgi_pcie_heap alloc failed\n"); - goto fail1; - } - INIT_LIST_HEAD(&xgi_pcie_heap->free_list); - INIT_LIST_HEAD(&xgi_pcie_heap->used_list); - INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); - - xgi_pcie_heap->max_freesize = info->pcie.size; - - xgi_pcie_cache_block = kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - - if (NULL == xgi_pcie_cache_block) - { - XGI_ERROR("Fail to creat xgi_pcie_block\n"); - goto fail2; - } - - block = (xgi_pcie_block_t *)xgi_pcie_new_node(); - if (!block) - { - XGI_ERROR("xgi_pcie_new_node failed\n"); - goto fail3; - } - - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = info->pcie.size; - - list_add(&block->list, &xgi_pcie_heap->free_list); - - XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); - return 1; -fail3: - if (xgi_pcie_cache_block) - { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } - -fail2: - if(xgi_pcie_heap) - { - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } -fail1: - xgi_pcie_lut_cleanup(info); - return 0; -} - -void xgi_pcie_heap_check(void) -{ - struct list_head *useList, *temp; - xgi_pcie_block_t *block; - unsigned int ownerIndex; - char *ownerStr[6] = {"2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE"}; - - if (xgi_pcie_heap) - { - useList = &xgi_pcie_heap->used_list; - temp = useList->next; - XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); - while (temp != useList) - { - block = list_entry(temp, struct xgi_pcie_block_s, list); - if (block->owner == PCIE_2D) - ownerIndex = 0; - else if (block->owner > PCIE_3D_TEXTURE || block->owner < PCIE_2D || block->owner < PCIE_3D) - ownerIndex = 5; - else - ownerIndex = block->owner - PCIE_3D + 1; - XGI_INFO("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", - ownerStr[ownerIndex], block->offset, block->size); - temp = temp->next; - } - - } -} - - -void xgi_pcie_heap_cleanup(xgi_info_t *info) -{ - struct list_head *free_list, *temp; - xgi_pcie_block_t *block; - int j; - - xgi_pcie_lut_cleanup(info); - XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); - - if (xgi_pcie_heap) - { - free_list = &xgi_pcie_heap->free_list; - for (j = 0; j < 3; j++, free_list++) - { - temp = free_list->next; - - while (temp != free_list) - { - block = list_entry(temp, struct xgi_pcie_block_s, list); - XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - j, block->offset, block->size); - xgi_pcie_block_stuff_free(block); - block->bus_addr = 0; - block->hw_addr = 0; - - temp = temp->next; - //XGI_INFO("No. %d free block: 0x%p \n", j, block); - kmem_cache_free(xgi_pcie_cache_block, block); - block = NULL; - } - } - - XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } - - if (xgi_pcie_cache_block) - { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } -} - - -static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t *info, - unsigned long originalSize, - enum PcieOwner owner) -{ - struct list_head *free_list; - xgi_pcie_block_t *block, *used_block, *free_block; - xgi_page_block_t *page_block, *prev_page_block; - struct page *page; - unsigned long page_order = 0, count = 0, index =0; - unsigned long page_addr = 0; - unsigned long *lut_addr = NULL; - unsigned long lut_id = 0; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - int i, j, page_count = 0; - int temp = 0; - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); - - if (owner == PCIE_3D) - { - if (xgi_pcie_vertex_block) - { - XGI_INFO("PCIE Vertex has been created, return directly.\n"); - return xgi_pcie_vertex_block; - } - } - - if (owner == PCIE_3D_CMDLIST) - { - if (xgi_pcie_cmdlist_block) - { - XGI_INFO("PCIE Cmdlist has been created, return directly.\n"); - return xgi_pcie_cmdlist_block; - } - } - - if (owner == PCIE_3D_SCRATCHPAD) - { - if (xgi_pcie_scratchpad_block) - { - XGI_INFO("PCIE Scratchpad has been created, return directly.\n"); - return xgi_pcie_scratchpad_block; - } - } - - if (size == 0) - { - XGI_ERROR("size == 0 \n"); - return (NULL); - } - - XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); - if (size > xgi_pcie_heap->max_freesize) - { - XGI_ERROR("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", - size, xgi_pcie_heap->max_freesize); - return (NULL); - } - - /* Jong 05/30/2006; find next free list which has enough space*/ - free_list = xgi_pcie_heap->free_list.next; - while (free_list != &xgi_pcie_heap->free_list) - { - //XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_pcie_block_s, list); - if (size <= block->size) - { - break; - } - free_list = free_list->next; - } - - if (free_list == &xgi_pcie_heap->free_list) - { - XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size/1024); - return (NULL); - } - - free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) - { - used_block = free_block; - XGI_INFO("size==free_block->size: free_block = 0x%p\n", free_block); - list_del(&free_block->list); - } - else - { - used_block = xgi_pcie_new_node(); - if (used_block == NULL) - { - return NULL; - } - - if (used_block == free_block) - { - XGI_ERROR("used_block == free_block = 0x%p\n", used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - xgi_pcie_heap->max_freesize -= size; - - used_block->bus_addr = info->pcie.base + used_block->offset; - used_block->hw_addr = info->pcie.base + used_block->offset; - used_block->page_count = page_count = size / PAGE_SIZE; - - /* get page_order base on page_count */ - for (used_block->page_order = 0; page_count; page_count >>= 1) - { - ++used_block->page_order; - } - - if ((used_block->page_count << 1) == (1 << used_block->page_order)) - { - used_block->page_order--; - } - XGI_INFO("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", - used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr, used_block->page_count, used_block->page_order); - - used_block->page_block = NULL; - //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); - //if (!used_block->page_block) return NULL; - //used_block->page_block->next = NULL; - - used_block->page_table = (xgi_pte_t *)kmalloc(sizeof(xgi_pte_t) * used_block->page_count, GFP_KERNEL); - if (used_block->page_table == NULL) - { - goto fail; - } - - lut_id = (used_block->offset >> PAGE_SHIFT); - lut_addr = info->lut_base; - lut_addr += lut_id; - XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); - - /* alloc free pages from system */ - page_count = used_block->page_count; - page_block = used_block->page_block; - prev_page_block = used_block->page_block; - for (i = 0; page_count > 0; i++) - { - /* if size is bigger than 2M bytes, it should be split */ - if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) - { - page_order = XGI_PCIE_ALLOC_MAX_ORDER; - } - else - { - count = page_count; - for (page_order = 0; count; count >>= 1, ++page_order); - - if ((page_count << 1) == (1 << page_order)) - { - page_order -= 1; - } - } - - count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", page_addr); - - if (!page_addr) - { - XGI_ERROR("No: %d :Can't get free pages: 0x%lx from system memory !\n", - i, count); - goto fail; - } - - /* Jong 05/30/2006; test */ - memset((unsigned char *)page_addr, 0xFF, PAGE_SIZE << page_order); - /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ - - if (page_block == NULL) - { - page_block = (xgi_page_block_t *)kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); - if (!page_block) - { - XGI_ERROR("Can't get memory for page_block! \n"); - goto fail; - } - } - - if (prev_page_block == NULL) - { - used_block->page_block = page_block; - prev_page_block = page_block; - } - else - { - prev_page_block->next = page_block; - prev_page_block = page_block; - } - - page_block->next = NULL; - page_block->phys_addr = __pa(page_addr); - page_block->virt_addr = page_addr; - page_block->page_count = count; - page_block->page_order = page_order; - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", page_block->phys_addr); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", page_block->virt_addr); - - page = virt_to_page(page_addr); - - //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" - // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", - // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); - - for (j = 0 ; j < count; j++, page++, lut_addr++) - { - used_block->page_table[index + j].phys_addr = __pa(page_address(page)); - used_block->page_table[index + j].virt_addr = (unsigned long)page_address(page); - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", used_block->page_table[index + j].phys_addr); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", used_block->page_table[index + j].virt_addr); - - *lut_addr = __pa(page_address(page)); - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - - if (temp) - { - XGI_INFO("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", - __pa(page_address(page)), lut_addr, j, *lut_addr); - temp--; - } - } - - page_block = page_block->next; - page_count -= count; - index += count; - temp = 0; - } - - used_block->owner = owner; - list_add(&used_block->list, &xgi_pcie_heap->used_list); - -#if defined(__i386__) || defined(__x86_64__) - asm volatile ( "wbinvd" ::: "memory" ); -#else - mb(); -#endif - - /* Flush GART Table */ - bWriteReg(0xB03F, 0x40); - bWriteReg(0xB03F, 0x00); - - if (owner == PCIE_3D) - { - xgi_pcie_vertex_block = used_block; - } - - if (owner == PCIE_3D_CMDLIST) - { - xgi_pcie_cmdlist_block = used_block; - } - - if (owner == PCIE_3D_SCRATCHPAD) - { - xgi_pcie_scratchpad_block = used_block; - } - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); - return (used_block); - -fail: - xgi_pcie_block_stuff_free(used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - return NULL; -} - -static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t *info, unsigned long offset) -{ - struct list_head *free_list, *used_list; - xgi_pcie_block_t *used_block, *block = NULL; - xgi_pcie_block_t *prev, *next; - unsigned long upper, lower; - - used_list = xgi_pcie_heap->used_list.next; - while (used_list != &xgi_pcie_heap->used_list) - { - block = list_entry(used_list, struct xgi_pcie_block_s, list); - if (block->offset == offset) - { - break; - } - used_list = used_list->next; - } - - if (used_list == &xgi_pcie_heap->used_list) - { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); - } - - used_block = block; - XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", - used_block, used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr); - - xgi_pcie_block_stuff_free(used_block); - - /* update xgi_pcie_heap */ - xgi_pcie_heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - free_list = xgi_pcie_heap->free_list.next; - - while (free_list != &xgi_pcie_heap->free_list) - { - block = list_entry(free_list, struct xgi_pcie_block_s, list); - if (block->offset == upper) - { - next = block; - } - else if ((block->offset + block->size) == lower) - { - prev = block; - } - free_list = free_list->next; - } - - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) - { - prev->size += (used_block->size + next->size); - list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_pcie_cache_block, next); - kmem_cache_free(xgi_pcie_cache_block, used_block); - next = NULL; - used_block = NULL; - return (prev); - } - - if (prev) - { - prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (prev); - } - - if (next) - { - next->size += used_block->size; - next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (next); - } - - used_block->bus_addr = 0; - used_block->hw_addr = 0; - used_block->page_count = 0; - used_block->page_order = 0; - list_add(&used_block->list, &xgi_pcie_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - return (used_block); -} - -void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, - enum PcieOwner owner, xgi_mem_alloc_t *alloc) -{ - xgi_pcie_block_t *block; - xgi_mem_pid_t *mempid_block; - - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_alloc(info, size, owner); - xgi_up(info->pcie_sem); - - if (block == NULL) - { - alloc->location = INVALID; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("PCIE RAM allocation failed\n"); - } - else - { - XGI_INFO("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", - block->offset, block->bus_addr); - alloc->location = NON_LOCAL; - alloc->size = block->size; - alloc->bus_addr = block->bus_addr; - alloc->hw_addr = block->hw_addr; - - /* - manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. - PCIE_3D request means a opengl process created. - PCIE_3D_TEXTURE request means texture cannot alloc from fb. - */ - if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) - { - mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - mempid_block->location = NON_LOCAL; - if (owner == PCIE_3D) - mempid_block->bus_addr = 0xFFFFFFFF;/*xgi_pcie_vertex_block has the address*/ - else - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; - - XGI_INFO("Memory ProcessID add one pcie block pid:%ld successfully! \n", mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} - -void xgi_pcie_free(xgi_info_t *info, unsigned long bus_addr) -{ - xgi_pcie_block_t *block; - unsigned long offset = bus_addr - info->pcie.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; - struct list_head *mempid_list; - char isvertex = 0; - int processcnt; - - if (xgi_pcie_vertex_block && xgi_pcie_vertex_block->bus_addr == bus_addr) - isvertex = 1; - - if (isvertex) - { - /*check is there any other process using vertex*/ - processcnt = 0; - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) - { - ++processcnt; - } - mempid_list = mempid_list->next; - } - if (processcnt > 1) - { - return; - } - } - - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_free(info, offset); - xgi_up(info->pcie_sem); - - if (block == NULL) - { - XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); - } - - if (isvertex) - xgi_pcie_vertex_block = NULL; - - /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) - { - mempid_freeblock = mempid_block; - break; - } - mempid_list = mempid_list->next; - } - if (mempid_freeblock) - { - list_del(&mempid_freeblock->list); - XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_freeblock->pid); - kfree(mempid_freeblock); - } -} - -/* - * given a bus address, fid the pcie mem block - * uses the bus address as the key. - */ -void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address) -{ - struct list_head *used_list; - xgi_pcie_block_t *block; - int i; - - used_list = xgi_pcie_heap->used_list.next; - - while (used_list != &xgi_pcie_heap->used_list) - { - block = list_entry(used_list, struct xgi_pcie_block_s, list); - - if (block->bus_addr == address) - { - return block; - } - - if (block->page_table) - { - for (i = 0; i < block->page_count; i++) - { - unsigned long offset = block->bus_addr; - if ( (address >= offset) && (address < (offset + PAGE_SIZE))) - { - return block; - } - } - } - used_list = used_list->next; - } - - XGI_ERROR("could not find map for vm 0x%lx\n", address); - - return NULL; -} - -/* - address -- GE HW address - return -- CPU virtual address - - assume the CPU VAddr is continuous in not the same block -*/ -void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address) -{ - struct list_head *used_list; - xgi_pcie_block_t *block; - unsigned long offset_in_page; - unsigned long loc_in_pagetable; - void * ret; - - XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); - - used_list = xgi_pcie_heap->used_list.next; - XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); - - offset_in_page = address & (PAGE_SIZE-1); - XGI_INFO("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", address, PAGE_SIZE-1, offset_in_page); - - while (used_list != &xgi_pcie_heap->used_list) - { - block = list_entry(used_list, struct xgi_pcie_block_s, list); - XGI_INFO("Jong_05292006-block=0x%px\n", block); - XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", block->hw_addr); - XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); - - if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) - { - loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT; - ret = (void*)(block->page_table[loc_in_pagetable].virt_addr + offset_in_page); - - XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); - XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", loc_in_pagetable); - XGI_INFO("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", block->page_table[loc_in_pagetable].virt_addr); - XGI_INFO("Jong_05292006-offset_in_page=%d\n", offset_in_page); - XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", ret); - - return ret ; - } - else - { - XGI_INFO("Jong_05292006-used_list = used_list->next;\n"); - used_list = used_list->next; - } - } - - XGI_ERROR("could not find map for vm 0x%lx\n", address); - return NULL; -} - - -void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) -{ - -} - -void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) -{ -} - -/* - address -- GE hw address -*/ -void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address) -{ - unsigned long * virtaddr = 0; - if (address == 0) - { - XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); - return; - } - - virtaddr = (unsigned long *) xgi_find_pcie_virt(info, address); - - XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); - XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); - XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); - if (virtaddr != NULL) - { - *virtaddr = 0x00f00fff; - } - - XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); -} - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" + +static xgi_pcie_heap_t *xgi_pcie_heap = NULL; +static kmem_cache_t *xgi_pcie_cache_block = NULL; +static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; +static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; +static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +extern struct list_head xgi_mempid_list; + +static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) +{ + struct page *page; + unsigned long page_addr = 0; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + + if (page_addr == 0UL) { + XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", + page_count); + return 0; + } + + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) { + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + } + + XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", + page_count, page_order, page_addr); + return page_addr; +} + +static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) +{ + struct page *page; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + + free_pages(page_addr, page_order); +} + +static int xgi_pcie_lut_init(xgi_info_t * info) +{ + unsigned char *page_addr = NULL; + unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; + unsigned long count = 0; + u8 temp = 0; + + /* Jong 06/06/2006 */ + unsigned long pcie_aperture_size; + + info->pcie.size = 128 * 1024 * 1024; + + /* Get current FB aperture size */ + temp = In3x5(0x27); + XGI_INFO("In3x5(0x27): 0x%x \n", temp); + + if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */ + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size = 256 * 1024 * 1024; + /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */ + } else { /* 128MB; Jong 06/05/2006; 0x08000000 */ + + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size = 128 * 1024 * 1024; + /* info->pcie.base = 128 * 1024 * 1024; */ + } + + /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ + /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ + /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ + /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */ + info->pcie.base = pcie_aperture_size; /* works */ + /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */ + /* info->pcie.base=128 * 1024 * 1024; *//* System hang */ + + XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + + /* Get current lookup table page size */ + temp = bReadReg(0xB00C); + if (temp & 0x04) { /* 8KB */ + info->lutPageSize = 8 * 1024; + } else { /* 4KB */ + + info->lutPageSize = 4 * 1024; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + +#if 0 + /* Get current lookup table location */ + temp = bReadReg(0xB00C); + if (temp & 0x02) { /* LFB */ + info->isLUTInLFB = TRUE; + /* Current we only support lookup table in LFB */ + temp &= 0xFD; + bWriteReg(0xB00C, temp); + info->isLUTInLFB = FALSE; + } else { /* SFB */ + + info->isLUTInLFB = FALSE; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + + /* Get current SDFB page size */ + temp = bReadReg(0xB00C); + if (temp & 0x08) { /* 8MB */ + info->sdfbPageSize = 8 * 1024 * 1024; + } else { /* 4MB */ + + info->sdfbPageSize = 4 * 1024 * 1024; + } +#endif + pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; + + /* + * Allocate memory for PCIE GART table; + */ + lutEntryNum = pciePageCount; + lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; + + /* get page_order base on page_count */ + count = lutPageCount; + for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ; + + if ((lutPageCount << 1) == (1 << lutPageOrder)) { + lutPageOrder -= 1; + } + + XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", + lutEntryNum, lutPageCount, lutPageOrder); + + info->lutPageOrder = lutPageOrder; + page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); + + if (!page_addr) { + XGI_ERROR("cannot allocate PCIE lut page!\n"); + goto fail; + } + info->lut_base = (unsigned long *)page_addr; + + XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", + page_addr, virt_to_phys(page_addr)); + + XGI_INFO + ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", + info->lut_base, __pa(info->lut_base), info->lutPageOrder); + + /* + * clean all PCIE GART Entry + */ + memset(page_addr, 0, PAGE_SIZE << lutPageOrder); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ("wbinvd":::"memory"); +#else + mb(); +#endif + + /* Set GART in SFB */ + bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); + /* Set GART base address to HW */ + dwWriteReg(0xB034, __pa(info->lut_base)); + + return 1; + fail: + return 0; +} + +static void xgi_pcie_lut_cleanup(xgi_info_t * info) +{ + if (info->lut_base) { + XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", + info->lut_base, info->lutPageOrder); + xgi_pcie_lut_free((unsigned long)info->lut_base, + info->lutPageOrder); + info->lut_base = NULL; + } +} + +static xgi_pcie_block_t *xgi_pcie_new_node(void) +{ + xgi_pcie_block_t *block = + (xgi_pcie_block_t *) kmem_cache_alloc(xgi_pcie_cache_block, + GFP_KERNEL); + if (block == NULL) { + return NULL; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = 0; /* The block size. */ + block->bus_addr = 0; /* CPU access address/bus address */ + block->hw_addr = 0; /* GE access address */ + block->page_count = 0; + block->page_order = 0; + block->page_block = NULL; + block->page_table = NULL; + block->owner = PCIE_INVALID; + + return block; +} + +static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) +{ + struct page *page; + xgi_page_block_t *page_block = block->page_block; + xgi_page_block_t *free_block; + unsigned long page_count = 0; + int i; + + //XGI_INFO("block->page_block: 0x%p \n", block->page_block); + while (page_block) { + page_count = page_block->page_count; + + page = virt_to_page(page_block->virt_addr); + for (i = 0; i < page_count; i++, page++) { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + free_pages(page_block->virt_addr, page_block->page_order); + + page_block->phys_addr = 0; + page_block->virt_addr = 0; + page_block->page_count = 0; + page_block->page_order = 0; + + free_block = page_block; + page_block = page_block->next; + //XGI_INFO("free free_block: 0x%p \n", free_block); + kfree(free_block); + free_block = NULL; + } + + if (block->page_table) { + //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); + kfree(block->page_table); + block->page_table = NULL; + } +} + +int xgi_pcie_heap_init(xgi_info_t * info) +{ + xgi_pcie_block_t *block; + + if (!xgi_pcie_lut_init(info)) { + XGI_ERROR("xgi_pcie_lut_init failed\n"); + return 0; + } + + xgi_pcie_heap = + (xgi_pcie_heap_t *) kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); + if (!xgi_pcie_heap) { + XGI_ERROR("xgi_pcie_heap alloc failed\n"); + goto fail1; + } + INIT_LIST_HEAD(&xgi_pcie_heap->free_list); + INIT_LIST_HEAD(&xgi_pcie_heap->used_list); + INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); + + xgi_pcie_heap->max_freesize = info->pcie.size; + + xgi_pcie_cache_block = + kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_pcie_cache_block) { + XGI_ERROR("Fail to creat xgi_pcie_block\n"); + goto fail2; + } + + block = (xgi_pcie_block_t *) xgi_pcie_new_node(); + if (!block) { + XGI_ERROR("xgi_pcie_new_node failed\n"); + goto fail3; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = info->pcie.size; + + list_add(&block->list, &xgi_pcie_heap->free_list); + + XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", + block->offset, block->size); + return 1; + fail3: + if (xgi_pcie_cache_block) { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } + + fail2: + if (xgi_pcie_heap) { + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } + fail1: + xgi_pcie_lut_cleanup(info); + return 0; +} + +void xgi_pcie_heap_check(void) +{ + struct list_head *useList, *temp; + xgi_pcie_block_t *block; + unsigned int ownerIndex; + char *ownerStr[6] = + { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; + + if (xgi_pcie_heap) { + useList = &xgi_pcie_heap->used_list; + temp = useList->next; + XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); + while (temp != useList) { + block = list_entry(temp, struct xgi_pcie_block_s, list); + if (block->owner == PCIE_2D) + ownerIndex = 0; + else if (block->owner > PCIE_3D_TEXTURE + || block->owner < PCIE_2D + || block->owner < PCIE_3D) + ownerIndex = 5; + else + ownerIndex = block->owner - PCIE_3D + 1; + XGI_INFO + ("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", + ownerStr[ownerIndex], block->offset, block->size); + temp = temp->next; + } + + } +} + +void xgi_pcie_heap_cleanup(xgi_info_t * info) +{ + struct list_head *free_list, *temp; + xgi_pcie_block_t *block; + int j; + + xgi_pcie_lut_cleanup(info); + XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); + + if (xgi_pcie_heap) { + free_list = &xgi_pcie_heap->free_list; + for (j = 0; j < 3; j++, free_list++) { + temp = free_list->next; + + while (temp != free_list) { + block = + list_entry(temp, struct xgi_pcie_block_s, + list); + XGI_INFO + ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + j, block->offset, block->size); + xgi_pcie_block_stuff_free(block); + block->bus_addr = 0; + block->hw_addr = 0; + + temp = temp->next; + //XGI_INFO("No. %d free block: 0x%p \n", j, block); + kmem_cache_free(xgi_pcie_cache_block, block); + block = NULL; + } + } + + XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } + + if (xgi_pcie_cache_block) { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } +} + +static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, + unsigned long originalSize, + enum PcieOwner owner) +{ + struct list_head *free_list; + xgi_pcie_block_t *block, *used_block, *free_block; + xgi_page_block_t *page_block, *prev_page_block; + struct page *page; + unsigned long page_order = 0, count = 0, index = 0; + unsigned long page_addr = 0; + unsigned long *lut_addr = NULL; + unsigned long lut_id = 0; + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + int i, j, page_count = 0; + int temp = 0; + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", + originalSize, size); + + if (owner == PCIE_3D) { + if (xgi_pcie_vertex_block) { + XGI_INFO + ("PCIE Vertex has been created, return directly.\n"); + return xgi_pcie_vertex_block; + } + } + + if (owner == PCIE_3D_CMDLIST) { + if (xgi_pcie_cmdlist_block) { + XGI_INFO + ("PCIE Cmdlist has been created, return directly.\n"); + return xgi_pcie_cmdlist_block; + } + } + + if (owner == PCIE_3D_SCRATCHPAD) { + if (xgi_pcie_scratchpad_block) { + XGI_INFO + ("PCIE Scratchpad has been created, return directly.\n"); + return xgi_pcie_scratchpad_block; + } + } + + if (size == 0) { + XGI_ERROR("size == 0 \n"); + return (NULL); + } + + XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); + if (size > xgi_pcie_heap->max_freesize) { + XGI_ERROR + ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", + size, xgi_pcie_heap->max_freesize); + return (NULL); + } + + /* Jong 05/30/2006; find next free list which has enough space */ + free_list = xgi_pcie_heap->free_list.next; + while (free_list != &xgi_pcie_heap->free_list) { + //XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (size <= block->size) { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_pcie_heap->free_list) { + XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", + size / 1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) { + used_block = free_block; + XGI_INFO("size==free_block->size: free_block = 0x%p\n", + free_block); + list_del(&free_block->list); + } else { + used_block = xgi_pcie_new_node(); + if (used_block == NULL) { + return NULL; + } + + if (used_block == free_block) { + XGI_ERROR("used_block == free_block = 0x%p\n", + used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_pcie_heap->max_freesize -= size; + + used_block->bus_addr = info->pcie.base + used_block->offset; + used_block->hw_addr = info->pcie.base + used_block->offset; + used_block->page_count = page_count = size / PAGE_SIZE; + + /* get page_order base on page_count */ + for (used_block->page_order = 0; page_count; page_count >>= 1) { + ++used_block->page_order; + } + + if ((used_block->page_count << 1) == (1 << used_block->page_order)) { + used_block->page_order--; + } + XGI_INFO + ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", + used_block->offset, used_block->size, used_block->bus_addr, + used_block->hw_addr, used_block->page_count, + used_block->page_order); + + used_block->page_block = NULL; + //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); + //if (!used_block->page_block) return NULL; + //used_block->page_block->next = NULL; + + used_block->page_table = + (xgi_pte_t *) kmalloc(sizeof(xgi_pte_t) * used_block->page_count, + GFP_KERNEL); + if (used_block->page_table == NULL) { + goto fail; + } + + lut_id = (used_block->offset >> PAGE_SHIFT); + lut_addr = info->lut_base; + lut_addr += lut_id; + XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); + + /* alloc free pages from system */ + page_count = used_block->page_count; + page_block = used_block->page_block; + prev_page_block = used_block->page_block; + for (i = 0; page_count > 0; i++) { + /* if size is bigger than 2M bytes, it should be split */ + if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) { + page_order = XGI_PCIE_ALLOC_MAX_ORDER; + } else { + count = page_count; + for (page_order = 0; count; count >>= 1, ++page_order) ; + + if ((page_count << 1) == (1 << page_order)) { + page_order -= 1; + } + } + + count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", + page_addr); + + if (!page_addr) { + XGI_ERROR + ("No: %d :Can't get free pages: 0x%lx from system memory !\n", + i, count); + goto fail; + } + + /* Jong 05/30/2006; test */ + memset((unsigned char *)page_addr, 0xFF, + PAGE_SIZE << page_order); + /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ + + if (page_block == NULL) { + page_block = + (xgi_page_block_t *) + kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); + if (!page_block) { + XGI_ERROR + ("Can't get memory for page_block! \n"); + goto fail; + } + } + + if (prev_page_block == NULL) { + used_block->page_block = page_block; + prev_page_block = page_block; + } else { + prev_page_block->next = page_block; + prev_page_block = page_block; + } + + page_block->next = NULL; + page_block->phys_addr = __pa(page_addr); + page_block->virt_addr = page_addr; + page_block->page_count = count; + page_block->page_order = page_order; + + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", + page_block->phys_addr); + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", + page_block->virt_addr); + + page = virt_to_page(page_addr); + + //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" + // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", + // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); + + for (j = 0; j < count; j++, page++, lut_addr++) { + used_block->page_table[index + j].phys_addr = + __pa(page_address(page)); + used_block->page_table[index + j].virt_addr = + (unsigned long)page_address(page); + + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", + used_block->page_table[index + j].phys_addr); + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", + used_block->page_table[index + j].virt_addr); + + *lut_addr = __pa(page_address(page)); + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + + if (temp) { + XGI_INFO + ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", + __pa(page_address(page)), lut_addr, j, + *lut_addr); + temp--; + } + } + + page_block = page_block->next; + page_count -= count; + index += count; + temp = 0; + } + + used_block->owner = owner; + list_add(&used_block->list, &xgi_pcie_heap->used_list); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ("wbinvd":::"memory"); +#else + mb(); +#endif + + /* Flush GART Table */ + bWriteReg(0xB03F, 0x40); + bWriteReg(0xB03F, 0x00); + + if (owner == PCIE_3D) { + xgi_pcie_vertex_block = used_block; + } + + if (owner == PCIE_3D_CMDLIST) { + xgi_pcie_cmdlist_block = used_block; + } + + if (owner == PCIE_3D_SCRATCHPAD) { + xgi_pcie_scratchpad_block = used_block; + } + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); + return (used_block); + + fail: + xgi_pcie_block_stuff_free(used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + return NULL; +} + +static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, + unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_pcie_block_t *used_block, *block = NULL; + xgi_pcie_block_t *prev, *next; + unsigned long upper, lower; + + used_list = xgi_pcie_heap->used_list.next; + while (used_list != &xgi_pcie_heap->used_list) { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + if (block->offset == offset) { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_pcie_heap->used_list) { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO + ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", + used_block, used_block->offset, used_block->size, + used_block->bus_addr, used_block->hw_addr); + + xgi_pcie_block_stuff_free(used_block); + + /* update xgi_pcie_heap */ + xgi_pcie_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_pcie_heap->free_list.next; + + while (free_list != &xgi_pcie_heap->free_list) { + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (block->offset == upper) { + next = block; + } else if ((block->offset + block->size) == lower) { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_pcie_cache_block, next); + kmem_cache_free(xgi_pcie_cache_block, used_block); + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (next); + } + + used_block->bus_addr = 0; + used_block->hw_addr = 0; + used_block->page_count = 0; + used_block->page_order = 0; + list_add(&used_block->list, &xgi_pcie_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + return (used_block); +} + +void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, + enum PcieOwner owner, xgi_mem_alloc_t * alloc) +{ + xgi_pcie_block_t *block; + xgi_mem_pid_t *mempid_block; + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_alloc(info, size, owner); + xgi_up(info->pcie_sem); + + if (block == NULL) { + alloc->location = INVALID; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("PCIE RAM allocation failed\n"); + } else { + XGI_INFO + ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", + block->offset, block->bus_addr); + alloc->location = NON_LOCAL; + alloc->size = block->size; + alloc->bus_addr = block->bus_addr; + alloc->hw_addr = block->hw_addr; + + /* + manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. + PCIE_3D request means a opengl process created. + PCIE_3D_TEXTURE request means texture cannot alloc from fb. + */ + if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { + mempid_block = + kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + mempid_block->location = NON_LOCAL; + if (owner == PCIE_3D) + mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ + else + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + XGI_INFO + ("Memory ProcessID add one pcie block pid:%ld successfully! \n", + mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) +{ + xgi_pcie_block_t *block; + unsigned long offset = bus_addr - info->pcie.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + char isvertex = 0; + int processcnt; + + if (xgi_pcie_vertex_block + && xgi_pcie_vertex_block->bus_addr == bus_addr) + isvertex = 1; + + if (isvertex) { + /*check is there any other process using vertex */ + processcnt = 0; + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL + && mempid_block->bus_addr == 0xFFFFFFFF) { + ++processcnt; + } + mempid_list = mempid_list->next; + } + if (processcnt > 1) { + return; + } + } + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_free(info, offset); + xgi_up(info->pcie_sem); + + if (block == NULL) { + XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); + } + + if (isvertex) + xgi_pcie_vertex_block = NULL; + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL + && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) + || (!isvertex && mempid_block->bus_addr == bus_addr))) { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) { + list_del(&mempid_freeblock->list); + XGI_INFO + ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", + mempid_freeblock->pid); + kfree(mempid_freeblock); + } +} + +/* + * given a bus address, fid the pcie mem block + * uses the bus address as the key. + */ +void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + int i; + + used_list = xgi_pcie_heap->used_list.next; + + while (used_list != &xgi_pcie_heap->used_list) { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + + if (block->bus_addr == address) { + return block; + } + + if (block->page_table) { + for (i = 0; i < block->page_count; i++) { + unsigned long offset = block->bus_addr; + if ((address >= offset) + && (address < (offset + PAGE_SIZE))) { + return block; + } + } + } + used_list = used_list->next; + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + + return NULL; +} + +/* + address -- GE HW address + return -- CPU virtual address + + assume the CPU VAddr is continuous in not the same block +*/ +void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + unsigned long offset_in_page; + unsigned long loc_in_pagetable; + void *ret; + + XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); + + used_list = xgi_pcie_heap->used_list.next; + XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); + + offset_in_page = address & (PAGE_SIZE - 1); + XGI_INFO + ("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", + address, PAGE_SIZE - 1, offset_in_page); + + while (used_list != &xgi_pcie_heap->used_list) { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + XGI_INFO("Jong_05292006-block=0x%px\n", block); + XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", + block->hw_addr); + XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + + if ((address >= block->hw_addr) + && (address < (block->hw_addr + block->size))) { + loc_in_pagetable = + (address - block->hw_addr) >> PAGE_SHIFT; + ret = + (void *)(block->page_table[loc_in_pagetable]. + virt_addr + offset_in_page); + + XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); + XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", + loc_in_pagetable); + XGI_INFO + ("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", + block->page_table[loc_in_pagetable].virt_addr); + XGI_INFO("Jong_05292006-offset_in_page=%d\n", + offset_in_page); + XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", + ret); + + return ret; + } else { + XGI_INFO + ("Jong_05292006-used_list = used_list->next;\n"); + used_list = used_list->next; + } + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + return NULL; +} + +void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +{ + +} + +void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +{ +} + +/* + address -- GE hw address +*/ +void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address) +{ + unsigned long *virtaddr = 0; + if (address == 0) { + XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); + return; + } + + virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address); + + XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); + XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); + XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); + if (virtaddr != NULL) { + *virtaddr = 0x00f00fff; + } + + XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); +} diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index cd5f85b8..32c2b584 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -1,73 +1,73 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_PCIE_H_ -#define _XGI_PCIE_H_ - -#ifndef XGI_PCIE_ALLOC_MAX_ORDER -#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ -#endif - -typedef struct xgi_page_block_s { - struct xgi_page_block_s *next; - unsigned long phys_addr; - unsigned long virt_addr; - unsigned long page_count; - unsigned long page_order; -} xgi_page_block_t; - -typedef struct xgi_pcie_block_s { - struct list_head list; - unsigned long offset; /* block's offset in pcie memory, begin from 0 */ - unsigned long size; /* The block size. */ - unsigned long bus_addr; /* CPU access address/bus address */ - unsigned long hw_addr; /* GE access address */ - - unsigned long page_count; - unsigned long page_order; - xgi_page_block_t *page_block; - xgi_pte_t *page_table; /* list of physical pages allocated */ - - atomic_t use_count; - enum PcieOwner owner; - unsigned long processID; -} xgi_pcie_block_t; - -typedef struct xgi_pcie_list_s { - xgi_pcie_block_t *head; - xgi_pcie_block_t *tail; -} xgi_pcie_list_t; - -typedef struct xgi_pcie_heap_s { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; -} xgi_pcie_heap_t; - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_PCIE_H_ +#define _XGI_PCIE_H_ + +#ifndef XGI_PCIE_ALLOC_MAX_ORDER +#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ +#endif + +typedef struct xgi_page_block_s { + struct xgi_page_block_s *next; + unsigned long phys_addr; + unsigned long virt_addr; + unsigned long page_count; + unsigned long page_order; +} xgi_page_block_t; + +typedef struct xgi_pcie_block_s { + struct list_head list; + unsigned long offset; /* block's offset in pcie memory, begin from 0 */ + unsigned long size; /* The block size. */ + unsigned long bus_addr; /* CPU access address/bus address */ + unsigned long hw_addr; /* GE access address */ + + unsigned long page_count; + unsigned long page_order; + xgi_page_block_t *page_block; + xgi_pte_t *page_table; /* list of physical pages allocated */ + + atomic_t use_count; + enum PcieOwner owner; + unsigned long processID; +} xgi_pcie_block_t; + +typedef struct xgi_pcie_list_s { + xgi_pcie_block_t *head; + xgi_pcie_block_t *tail; +} xgi_pcie_list_t; + +typedef struct xgi_pcie_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; +} xgi_pcie_heap_t; + +#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 18448139..487a7e15 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -1,410 +1,404 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#ifndef _XGI_REGS_H_ -#define _XGI_REGS_H_ - -#ifndef XGI_MMIO - #define XGI_MMIO 1 -#endif - -#if XGI_MMIO -#define OUTB(port, value) writeb(value, info->mmio.vbase + port) -#define INB(port) readb(info->mmio.vbase + port) -#define OUTW(port, value) writew(value, info->mmio.vbase + port) -#define INW(port) readw(info->mmio.vbase + port) -#define OUTDW(port, value) writel(value, info->mmio.vbase + port) -#define INDW(port) readl(info->mmio.vbase + port) -#else -#define OUTB(port, value) outb(value, port) -#define INB(port) inb(port) -#define OUTW(port, value) outw(value, port) -#define INW(port) inw(port) -#define OUTDW(port, value) outl(value, port) -#define INDW(port) inl(port) -#endif - -/* Hardware access functions */ -static inline void OUT3C5B(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3C4, index); - OUTB(0x3C5, data); -} - -static inline void OUT3X5B(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3D4, index); - OUTB(0x3D5, data); -} - -static inline void OUT3CFB(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3CE, index); - OUTB(0x3CF, data); -} - -static inline u8 IN3C5B(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; -} - -static inline u8 IN3X5B(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; -} - -static inline u8 IN3CFB(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; -} - -static inline void OUT3C5W(xgi_info_t *info, u8 index, u16 data) -{ - OUTB(0x3C4, index); - OUTB(0x3C5, data); -} - -static inline void OUT3X5W(xgi_info_t *info, u8 index, u16 data) -{ - OUTB(0x3D4, index); - OUTB(0x3D5, data); -} - -static inline void OUT3CFW(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3CE, index); - OUTB(0x3CF, data); -} - -static inline u8 IN3C5W(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; -} - -static inline u8 IN3X5W(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; -} - -static inline u8 IN3CFW(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; -} - -static inline u8 readAttr(xgi_info_t *info, u8 index) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - return INB(0x3C1); -} - -static inline void writeAttr(xgi_info_t *info, u8 index, u8 value) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - OUTB(0x3C0, value); -} - -/* - * Graphic engine register (2d/3d) acessing interface - */ -static inline void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data) -{ - /* Jong 05/25/2006 */ - XGI_INFO("Jong-WriteRegDWord()-Begin \n"); - XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", info->mmio.vbase); - XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); - XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); - /* return; */ - - *(volatile u32*)(info->mmio.vbase + addr) = (data); - XGI_INFO("Jong-WriteRegDWord()-End \n"); -} - -static inline void WriteRegWord(xgi_info_t *info, u32 addr, u16 data) -{ - *(volatile u16*)(info->mmio.vbase + addr) = (data); -} - -static inline void WriteRegByte(xgi_info_t *info, u32 addr, u8 data) -{ - *(volatile u8*)(info->mmio.vbase + addr) = (data); -} - -static inline u32 ReadRegDWord(xgi_info_t *info, u32 addr) -{ - volatile u32 data; - data = *(volatile u32*)(info->mmio.vbase + addr); - return data; -} - -static inline u16 ReadRegWord(xgi_info_t *info, u32 addr) -{ - volatile u16 data; - data = *(volatile u16*)(info->mmio.vbase + addr); - return data; -} - -static inline u8 ReadRegByte(xgi_info_t *info, u32 addr) -{ - volatile u8 data; - data = *(volatile u8*)(info->mmio.vbase + addr); - return data; -} -#if 0 -extern void OUT3C5B(xgi_info_t *info, u8 index, u8 data); -extern void OUT3X5B(xgi_info_t *info, u8 index, u8 data); -extern void OUT3CFB(xgi_info_t *info, u8 index, u8 data); -extern u8 IN3C5B(xgi_info_t *info, u8 index); -extern u8 IN3X5B(xgi_info_t *info, u8 index); -extern u8 IN3CFB(xgi_info_t *info, u8 index); -extern void OUT3C5W(xgi_info_t *info, u8 index, u8 data); -extern void OUT3X5W(xgi_info_t *info, u8 index, u8 data); -extern void OUT3CFW(xgi_info_t *info, u8 index, u8 data); -extern u8 IN3C5W(xgi_info_t *info, u8 index); -extern u8 IN3X5W(xgi_info_t *info, u8 index); -extern u8 IN3CFW(xgi_info_t *info, u8 index); - -extern void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data); -extern void WriteRegWord(xgi_info_t *info, u32 addr, u16 data); -extern void WriteRegByte(xgi_info_t *info, u32 addr, u8 data); -extern u32 ReadRegDWord(xgi_info_t *info, u32 addr); -extern u16 ReadRegWord(xgi_info_t *info, u32 addr); -extern u8 ReadRegByte(xgi_info_t *info, u32 addr); - -extern void EnableProtect(); -extern void DisableProtect(); -#endif - -#define Out(port, data) OUTB(port, data) -#define bOut(port, data) OUTB(port, data) -#define wOut(port, data) OUTW(port, data) -#define dwOut(port, data) OUTDW(port, data) - -#define Out3x5(index, data) OUT3X5B(info, index, data) -#define bOut3x5(index, data) OUT3X5B(info, index, data) -#define wOut3x5(index, data) OUT3X5W(info, index, data) - -#define Out3c5(index, data) OUT3C5B(info, index, data) -#define bOut3c5(index, data) OUT3C5B(info, index, data) -#define wOut3c5(index, data) OUT3C5W(info, index, data) - -#define Out3cf(index, data) OUT3CFB(info, index, data) -#define bOut3cf(index, data) OUT3CFB(info, index, data) -#define wOut3cf(index, data) OUT3CFW(info, index, data) - -#define In(port) INB(port) -#define bIn(port) INB(port) -#define wIn(port) INW(port) -#define dwIn(port) INDW(port) - -#define In3x5(index) IN3X5B(info, index) -#define bIn3x5(index) IN3X5B(info, index) -#define wIn3x5(index) IN3X5W(info, index) - -#define In3c5(index) IN3C5B(info, index) -#define bIn3c5(index) IN3C5B(info, index) -#define wIn3c5(index) IN3C5W(info, index) - -#define In3cf(index) IN3CFB(info, index) -#define bIn3cf(index) IN3CFB(info, index) -#define wIn3cf(index) IN3CFW(info, index) - -#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) -#define wWriteReg(addr, data) WriteRegWord(info, addr, data) -#define bWriteReg(addr, data) WriteRegByte(info, addr, data) -#define dwReadReg(addr) ReadRegDWord(info, addr) -#define wReadReg(addr) ReadRegWord(info, addr) -#define bReadReg(addr) ReadRegByte(info, addr) - -static inline void xgi_protect_all(xgi_info_t *info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - -static inline void xgi_unprotect_all(xgi_info_t *info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - -static inline void xgi_enable_mmio(xgi_info_t *info) -{ - u8 protect = 0; - - /* Unprotect registers */ - outb(0x11, 0x3C4); - protect = inb(0x3C5); - outb(0x92, 0x3C5); - - outb(0x3A, 0x3D4); - outb(inb(0x3D5) | 0x20, 0x3D5); - - /* Enable MMIO */ - outb(0x39, 0x3D4); - outb(inb(0x3D5) | 0x01, 0x3D5); - - OUTB(0x3C4, 0x11); - OUTB(0x3C5, protect); -} - -static inline void xgi_disable_mmio(xgi_info_t *info) -{ - u8 protect = 0; - - /* unprotect registers */ - OUTB(0x3C4, 0x11); - protect = INB(0x3C5); - OUTB(0x3C5, 0x92); - - /* Disable MMIO access */ - OUTB(0x3D4, 0x39); - OUTB(0x3D5, INB(0x3D5) & 0xFE); - - /* Protect registers */ - outb(0x11, 0x3C4); - outb(protect, 0x3C5); -} - -static inline void xgi_enable_ge(xgi_info_t *info) -{ - unsigned char bOld3cf2a = 0; - int wait = 0; - - // Enable GE - OUTW(0x3C4, 0x9211); - - // Save and close dynamic gating - bOld3cf2a = bIn3cf(0x2a); - bOut3cf(0x2a, bOld3cf2a & 0xfe); - - // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x94); - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x84); - wait = 10; - while (wait--) - { - bIn(0x36); - } - // Enable 2D engine only - bOut3x5(0x36, 0x80); - - // Enable 2D+3D engine - bOut3x5(0x36, 0x84); - - // Restore dynamic gating - bOut3cf(0x2a, bOld3cf2a); -} - -static inline void xgi_disable_ge(xgi_info_t *info) -{ - int wait = 0; - - // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); - - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x94); - - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x84); - - wait = 10; - while (wait--) - { - bIn(0x36); - } - - // Disable 2D engine only - bOut3x5(0x36, 0); -} - -static inline void xgi_enable_dvi_interrupt(xgi_info_t *info) -{ - Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 - Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 - Out3cf(0x39, In3cf(0x39) | 0x02); -} -static inline void xgi_disable_dvi_interrupt(xgi_info_t *info) -{ - Out3cf(0x39,In3cf(0x39) & ~0x02); -} - -static inline void xgi_enable_crt1_interrupt(xgi_info_t *info) -{ - Out3cf(0x3d,In3cf(0x3d) | 0x04); - Out3cf(0x3d,In3cf(0x3d) & ~0x04); - Out3cf(0x3d,In3cf(0x3d) | 0x08); -} - -static inline void xgi_disable_crt1_interrupt(xgi_info_t *info) -{ - Out3cf(0x3d,In3cf(0x3d) & ~0x08); -} - -#endif - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_REGS_H_ +#define _XGI_REGS_H_ + +#ifndef XGI_MMIO +#define XGI_MMIO 1 +#endif + +#if XGI_MMIO +#define OUTB(port, value) writeb(value, info->mmio.vbase + port) +#define INB(port) readb(info->mmio.vbase + port) +#define OUTW(port, value) writew(value, info->mmio.vbase + port) +#define INW(port) readw(info->mmio.vbase + port) +#define OUTDW(port, value) writel(value, info->mmio.vbase + port) +#define INDW(port) readl(info->mmio.vbase + port) +#else +#define OUTB(port, value) outb(value, port) +#define INB(port) inb(port) +#define OUTW(port, value) outw(value, port) +#define INW(port) inw(port) +#define OUTDW(port, value) outl(value, port) +#define INDW(port) inl(port) +#endif + +/* Hardware access functions */ +static inline void OUT3C5B(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5B(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFB(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5B(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5B(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFB(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline void OUT3C5W(xgi_info_t * info, u8 index, u16 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5W(xgi_info_t * info, u8 index, u16 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFW(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5W(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5W(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFW(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline u8 readAttr(xgi_info_t * info, u8 index) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + return INB(0x3C1); +} + +static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + OUTB(0x3C0, value); +} + +/* + * Graphic engine register (2d/3d) acessing interface + */ +static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) +{ + /* Jong 05/25/2006 */ + XGI_INFO("Jong-WriteRegDWord()-Begin \n"); + XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", + info->mmio.vbase); + XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); + XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); + /* return; */ + + *(volatile u32 *)(info->mmio.vbase + addr) = (data); + XGI_INFO("Jong-WriteRegDWord()-End \n"); +} + +static inline void WriteRegWord(xgi_info_t * info, u32 addr, u16 data) +{ + *(volatile u16 *)(info->mmio.vbase + addr) = (data); +} + +static inline void WriteRegByte(xgi_info_t * info, u32 addr, u8 data) +{ + *(volatile u8 *)(info->mmio.vbase + addr) = (data); +} + +static inline u32 ReadRegDWord(xgi_info_t * info, u32 addr) +{ + volatile u32 data; + data = *(volatile u32 *)(info->mmio.vbase + addr); + return data; +} + +static inline u16 ReadRegWord(xgi_info_t * info, u32 addr) +{ + volatile u16 data; + data = *(volatile u16 *)(info->mmio.vbase + addr); + return data; +} + +static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) +{ + volatile u8 data; + data = *(volatile u8 *)(info->mmio.vbase + addr); + return data; +} + +#if 0 +extern void OUT3C5B(xgi_info_t * info, u8 index, u8 data); +extern void OUT3X5B(xgi_info_t * info, u8 index, u8 data); +extern void OUT3CFB(xgi_info_t * info, u8 index, u8 data); +extern u8 IN3C5B(xgi_info_t * info, u8 index); +extern u8 IN3X5B(xgi_info_t * info, u8 index); +extern u8 IN3CFB(xgi_info_t * info, u8 index); +extern void OUT3C5W(xgi_info_t * info, u8 index, u8 data); +extern void OUT3X5W(xgi_info_t * info, u8 index, u8 data); +extern void OUT3CFW(xgi_info_t * info, u8 index, u8 data); +extern u8 IN3C5W(xgi_info_t * info, u8 index); +extern u8 IN3X5W(xgi_info_t * info, u8 index); +extern u8 IN3CFW(xgi_info_t * info, u8 index); + +extern void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data); +extern void WriteRegWord(xgi_info_t * info, u32 addr, u16 data); +extern void WriteRegByte(xgi_info_t * info, u32 addr, u8 data); +extern u32 ReadRegDWord(xgi_info_t * info, u32 addr); +extern u16 ReadRegWord(xgi_info_t * info, u32 addr); +extern u8 ReadRegByte(xgi_info_t * info, u32 addr); + +extern void EnableProtect(); +extern void DisableProtect(); +#endif + +#define Out(port, data) OUTB(port, data) +#define bOut(port, data) OUTB(port, data) +#define wOut(port, data) OUTW(port, data) +#define dwOut(port, data) OUTDW(port, data) + +#define Out3x5(index, data) OUT3X5B(info, index, data) +#define bOut3x5(index, data) OUT3X5B(info, index, data) +#define wOut3x5(index, data) OUT3X5W(info, index, data) + +#define Out3c5(index, data) OUT3C5B(info, index, data) +#define bOut3c5(index, data) OUT3C5B(info, index, data) +#define wOut3c5(index, data) OUT3C5W(info, index, data) + +#define Out3cf(index, data) OUT3CFB(info, index, data) +#define bOut3cf(index, data) OUT3CFB(info, index, data) +#define wOut3cf(index, data) OUT3CFW(info, index, data) + +#define In(port) INB(port) +#define bIn(port) INB(port) +#define wIn(port) INW(port) +#define dwIn(port) INDW(port) + +#define In3x5(index) IN3X5B(info, index) +#define bIn3x5(index) IN3X5B(info, index) +#define wIn3x5(index) IN3X5W(info, index) + +#define In3c5(index) IN3C5B(info, index) +#define bIn3c5(index) IN3C5B(info, index) +#define wIn3c5(index) IN3C5W(info, index) + +#define In3cf(index) IN3CFB(info, index) +#define bIn3cf(index) IN3CFB(info, index) +#define wIn3cf(index) IN3CFW(info, index) + +#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) +#define wWriteReg(addr, data) WriteRegWord(info, addr, data) +#define bWriteReg(addr, data) WriteRegByte(info, addr, data) +#define dwReadReg(addr) ReadRegDWord(info, addr) +#define wReadReg(addr) ReadRegWord(info, addr) +#define bReadReg(addr) ReadRegByte(info, addr) + +static inline void xgi_protect_all(xgi_info_t * info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_unprotect_all(xgi_info_t * info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_enable_mmio(xgi_info_t * info) +{ + u8 protect = 0; + + /* Unprotect registers */ + outb(0x11, 0x3C4); + protect = inb(0x3C5); + outb(0x92, 0x3C5); + + outb(0x3A, 0x3D4); + outb(inb(0x3D5) | 0x20, 0x3D5); + + /* Enable MMIO */ + outb(0x39, 0x3D4); + outb(inb(0x3D5) | 0x01, 0x3D5); + + OUTB(0x3C4, 0x11); + OUTB(0x3C5, protect); +} + +static inline void xgi_disable_mmio(xgi_info_t * info) +{ + u8 protect = 0; + + /* unprotect registers */ + OUTB(0x3C4, 0x11); + protect = INB(0x3C5); + OUTB(0x3C5, 0x92); + + /* Disable MMIO access */ + OUTB(0x3D4, 0x39); + OUTB(0x3D5, INB(0x3D5) & 0xFE); + + /* Protect registers */ + outb(0x11, 0x3C4); + outb(protect, 0x3C5); +} + +static inline void xgi_enable_ge(xgi_info_t * info) +{ + unsigned char bOld3cf2a = 0; + int wait = 0; + + // Enable GE + OUTW(0x3C4, 0x9211); + + // Save and close dynamic gating + bOld3cf2a = bIn3cf(0x2a); + bOut3cf(0x2a, bOld3cf2a & 0xfe); + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) { + bIn(0x36); + } + // Enable 2D engine only + bOut3x5(0x36, 0x80); + + // Enable 2D+3D engine + bOut3x5(0x36, 0x84); + + // Restore dynamic gating + bOut3cf(0x2a, bOld3cf2a); +} + +static inline void xgi_disable_ge(xgi_info_t * info) +{ + int wait = 0; + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) { + bIn(0x36); + } + + // Disable 2D engine only + bOut3x5(0x36, 0); +} + +static inline void xgi_enable_dvi_interrupt(xgi_info_t * info) +{ + Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 + Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 + Out3cf(0x39, In3cf(0x39) | 0x02); +} +static inline void xgi_disable_dvi_interrupt(xgi_info_t * info) +{ + Out3cf(0x39, In3cf(0x39) & ~0x02); +} + +static inline void xgi_enable_crt1_interrupt(xgi_info_t * info) +{ + Out3cf(0x3d, In3cf(0x3d) | 0x04); + Out3cf(0x3d, In3cf(0x3d) & ~0x04); + Out3cf(0x3d, In3cf(0x3d) | 0x08); +} + +static inline void xgi_disable_crt1_interrupt(xgi_info_t * info) +{ + Out3cf(0x3d, In3cf(0x3d) & ~0x08); +} + +#endif diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 24cb8f3c..65ec498b 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -1,68 +1,67 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_TYPES_H_ -#define _XGI_TYPES_H_ - -/**************************************************************************** - * Typedefs * - ***************************************************************************/ - -typedef unsigned char V8; /* "void": enumerated or multiple fields */ -typedef unsigned short V16; /* "void": enumerated or multiple fields */ -typedef unsigned char U8; /* 0 to 255 */ -typedef unsigned short U16; /* 0 to 65535 */ -typedef signed char S8; /* -128 to 127 */ -typedef signed short S16; /* -32768 to 32767 */ -typedef float F32; /* IEEE Single Precision (S1E8M23) */ -typedef double F64; /* IEEE Double Precision (S1E11M52) */ -typedef unsigned long BOOL; -/* - * mainly for 64-bit linux, where long is 64 bits - * and win9x, where int is 16 bit. - */ -#if defined(vxworks) -typedef unsigned int V32; /* "void": enumerated or multiple fields */ -typedef unsigned int U32; /* 0 to 4294967295 */ -typedef signed int S32; /* -2147483648 to 2147483647 */ -#else -typedef unsigned long V32; /* "void": enumerated or multiple fields */ -typedef unsigned long U32; /* 0 to 4294967295 */ -typedef signed long S32; /* -2147483648 to 2147483647 */ -#endif - -#ifndef TRUE -#define TRUE 1UL -#endif - -#ifndef FALSE -#define FALSE 0UL -#endif - -#endif - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_TYPES_H_ +#define _XGI_TYPES_H_ + +/**************************************************************************** + * Typedefs * + ***************************************************************************/ + +typedef unsigned char V8; /* "void": enumerated or multiple fields */ +typedef unsigned short V16; /* "void": enumerated or multiple fields */ +typedef unsigned char U8; /* 0 to 255 */ +typedef unsigned short U16; /* 0 to 65535 */ +typedef signed char S8; /* -128 to 127 */ +typedef signed short S16; /* -32768 to 32767 */ +typedef float F32; /* IEEE Single Precision (S1E8M23) */ +typedef double F64; /* IEEE Double Precision (S1E11M52) */ +typedef unsigned long BOOL; +/* + * mainly for 64-bit linux, where long is 64 bits + * and win9x, where int is 16 bit. + */ +#if defined(vxworks) +typedef unsigned int V32; /* "void": enumerated or multiple fields */ +typedef unsigned int U32; /* 0 to 4294967295 */ +typedef signed int S32; /* -2147483648 to 2147483647 */ +#else +typedef unsigned long V32; /* "void": enumerated or multiple fields */ +typedef unsigned long U32; /* 0 to 4294967295 */ +typedef signed long S32; /* -2147483648 to 2147483647 */ +#endif + +#ifndef TRUE +#define TRUE 1UL +#endif + +#ifndef FALSE +#define FALSE 0UL +#endif + +#endif From ec9e494eb99d409a7e1e97bb6c5f71e9bb5a4486 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:15:22 -0700 Subject: [PATCH 037/437] Gut support for pre-2.6 kernels. --- linux-core/xgi_linux.h | 96 ++---------------------------------------- 1 file changed, 4 insertions(+), 92 deletions(-) diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 67c1af82..77660ee0 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -35,20 +35,8 @@ #include #endif -#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ -#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) -# error "This driver does not support pre-2.4 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -#define KERNEL_2_4 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -# error "This driver does not support 2.5 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) -#define KERNEL_2_6 -#else -# error "This driver does not support development kernels!" +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +# error "This driver does not support pre-2.6 kernels!" #endif #if defined (CONFIG_SMP) && !defined (__SMP__) @@ -59,10 +47,6 @@ #define MODVERSIONS #endif -#if defined (MODVERSIONS) && !defined (KERNEL_2_6) -#include -#endif - #include /* printk */ #include @@ -88,12 +72,10 @@ #define XGI_SCAN_PROCESS(p) for_each_task(p) #endif -#ifdef KERNEL_2_6 #include /* module_param() */ #include /* kernel_locked */ #include /* flush_tlb(), flush_tlb_all() */ #include /* page table entry lookup */ -#endif #include /* pci_find_class, etc */ #include /* tasklets, interrupt helpers */ @@ -141,13 +123,8 @@ #endif #ifndef MAX_ORDER -#ifdef KERNEL_2_4 -#define MAX_ORDER 10 -#endif -#ifdef KERNEL_2_6 #define MAX_ORDER 11 #endif -#endif #ifndef module_init #define module_init(x) int init_module(void) { return x(); } @@ -171,39 +148,20 @@ typedef void irqreturn_t; pos = pos->next, prefetch(pos->next)) #endif -#ifdef KERNEL_2_4 -#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) -#endif -#ifdef KERNEL_2_6 extern struct list_head pci_devices; /* list of all devices */ #define XGI_PCI_FOR_EACH_DEV(dev) \ for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) -#endif /* * the following macro causes problems when used in the same module * as module_param(); undef it so we don't accidentally mix the two */ -#if defined (KERNEL_2_6) #undef MODULE_PARM -#endif #ifdef EXPORT_NO_SYMBOLS EXPORT_NO_SYMBOLS; #endif -#if defined (KERNEL_2_4) -#define XGI_IS_SUSER() suser() -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) -#define XGI_NUM_CPUS() smp_num_cpus -#define XGI_CLI() __cli() -#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt()) -#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") -#endif - -#if defined (KERNEL_2_6) #define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) #define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) #define XGI_NUM_CPUS() num_online_cpus() @@ -212,7 +170,7 @@ EXPORT_NO_SYMBOLS; #define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) #define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) #define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) -#endif + /* Earlier 2.4.x kernels don't have pci_disable_device() */ #ifdef XGI_PCI_DISABLE_DEVICE_PRESENT @@ -255,7 +213,7 @@ EXPORT_NO_SYMBOLS; * model is not sufficient for full acpi support. it may work in some cases, * but not enough for us to officially support this configuration. */ -#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#if defined(CONFIG_ACPI) #define XGI_PM_SUPPORT_ACPI #endif @@ -264,7 +222,6 @@ EXPORT_NO_SYMBOLS; #endif #if defined(CONFIG_DEVFS_FS) -#if defined(KERNEL_2_6) typedef void *devfs_handle_t; #define XGI_DEVFS_REGISTER(_name, _minor) \ ({ \ @@ -281,39 +238,10 @@ typedef void *devfs_handle_t; */ #define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") #define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") -#else // defined(KERNEL_2_4) -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ - XGI_DEV_MAJOR, _minor, \ - S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ - __handle; \ - }) - -#define XGI_DEVFS_REMOVE_DEVICE(i) \ - ({ \ - if (xgi_devfs_handles[i] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[i]); \ - } \ - }) -#define XGI_DEVFS_REMOVE_CONTROL() \ - ({ \ - if (xgi_devfs_handles[0] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[0]); \ - } \ - }) -#endif /* defined(KERNEL_2_4) */ #endif /* defined(CONFIG_DEVFS_FS) */ -#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) -#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) -#else #define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) #define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) -#endif #if defined(XGI_REMAP_PFN_RANGE_PRESENT) #define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ @@ -519,17 +447,6 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) #define XGI_SET_PAGE_ATTRIB_CACHED(page_list) #endif -#ifdef KERNEL_2_4 -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) - -#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) -#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) -#endif - -#ifdef KERNEL_2_6 /* add for SUSE 9, Jill*/ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) #define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) @@ -544,7 +461,6 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) #endif #define XGILockPage(page) SetPageLocked(page) #define XGIUnlockPage(page) ClearPageLocked(page) -#endif /* * hide a pointer to struct xgi_info_t in a file-private info @@ -564,11 +480,7 @@ typedef struct { /* for the card devices */ #define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) -#ifdef KERNEL_2_0 -#define INODE_FROM_FP(filp) ((filp)->f_inode) -#else #define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) -#endif #define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) #define XGI_ATOMIC_INC(data) atomic_inc(&(data)) From 7a053306a9f8152462fda521e1a8322ac2bdf9fd Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:16:04 -0700 Subject: [PATCH 038/437] linux/config.h is deprecated or gone. --- linux-core/xgi_linux.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 77660ee0..28349470 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -29,8 +29,6 @@ #ifndef _XGI_LINUX_H_ #define _XGI_LINUX_H_ -#include - #ifndef LINUX_VERSION_CODE #include #endif From 47bf6239aaefb977cc17e421af273c3278eb127c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:20:15 -0700 Subject: [PATCH 039/437] Clean up compile-time kernel feature detection. --- linux-core/xgi_linux.h | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 28349470..8cf304c7 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -37,6 +37,12 @@ # error "This driver does not support pre-2.6 kernels!" #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10) +# define XGI_REMAP_PFN_RANGE_PRESENT +#else +# define XGI_REMAP_PAGE_RANGE_5 +#endif + #if defined (CONFIG_SMP) && !defined (__SMP__) #define __SMP__ #endif @@ -170,12 +176,7 @@ EXPORT_NO_SYMBOLS; #define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) -/* Earlier 2.4.x kernels don't have pci_disable_device() */ -#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT #define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) -#else -#define XGI_PCI_DISABLE_DEVICE(dev) -#endif /* common defines */ #define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) @@ -195,15 +196,9 @@ EXPORT_NO_SYMBOLS; #define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) #ifdef XGI_PCI_GET_CLASS_PRESENT -#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) #else -#define XGI_PCI_DEV_PUT(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) #endif /* @@ -429,7 +424,6 @@ typedef struct xgi_pte_s { * 2.4.20 is the first kernel to address it properly. The * page_attr API provides the means to solve the problem. */ -#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) { struct page *page = virt_to_page(__va(page_ptr->phys_addr)); @@ -440,10 +434,6 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) struct page *page = virt_to_page(__va(page_ptr->phys_addr)); change_page_attr(page, 1, PAGE_KERNEL); } -#else -#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) -#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) -#endif /* add for SUSE 9, Jill*/ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) From 3a776fa01e61c1dc40a0a1803a80c98bf7e77164 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:26:10 -0700 Subject: [PATCH 040/437] Add XGI driver to Makefiles. --- linux-core/Makefile | 9 ++++++++- linux-core/Makefile.kernel | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/linux-core/Makefile b/linux-core/Makefile index 1758777c..2052459d 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -58,7 +58,7 @@ endif # Modules for all architectures MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \ - mach64.o nv.o nouveau.o + mach64.o nv.o nouveau.o xgi.o # Modules only for ix86 architectures ifneq (,$(findstring 86,$(MACHINE))) @@ -91,6 +91,8 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) NVHEADERS = nv_drv.h $(DRMHEADERS) FFBHEADERS = ffb_drv.h $(DRMHEADERS) NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS) +XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \ + xgi_pcie.h xgi_regs.h xgi_types.h PROGS = dristat drmstat @@ -284,6 +286,7 @@ CONFIG_DRM_VIA := n CONFIG_DRM_MACH64 := n CONFIG_DRM_NV := n CONFIG_DRM_NOUVEAU := n +CONFIG_DRM_XGI := n # Enable module builds for the modules requested/supported. @@ -320,6 +323,9 @@ endif ifneq (,$(findstring nouveau,$(DRM_MODULES))) CONFIG_DRM_NOUVEAU := m endif +ifneq (,$(findstring xgi,$(DRM_MODULES))) +CONFIG_DRM_XGI := m +endif # These require AGP support @@ -347,6 +353,7 @@ $(via-objs): $(VIAHEADERS) $(mach64-objs): $(MACH64HEADERS) $(nv-objs): $(NVHEADERS) $(nouveau-objs): $(NOUVEAUHEADERS) +$(xgi-objs): $(XGIHEADERS) endif diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6f5b021b..d9865f5a 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -35,6 +35,7 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ via_video.o via_dmablit.o via_fence.o via_buffer.o mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o nv-objs := nv_drv.o +xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o ifeq ($(CONFIG_COMPAT),y) drm-objs += drm_ioc32.o @@ -59,3 +60,4 @@ obj-$(CONFIG_DRM_VIA) += via.o obj-$(CONFIG_DRM_MACH64)+= mach64.o obj-$(CONFIG_DRM_NV) += nv.o obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o +obj-$(CONFIG_DRM_XGI) += xgi.o \ No newline at end of file From 3547fbda63925217a5be24de5d5abec3b53d3fe1 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:29:28 -0700 Subject: [PATCH 041/437] Revert over-zealous change from previous commit. --- linux-core/xgi_linux.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 8cf304c7..465feb3c 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -195,9 +195,12 @@ EXPORT_NO_SYMBOLS; #define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number #define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) +#define XGI_PCI_GET_CLASS_PRESENT #ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) #else +#define XGI_PCI_DEV_PUT(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) #endif From b9ef1467fed9e96c5e7bd453d01511f8ce98583c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:39:01 -0700 Subject: [PATCH 042/437] Clean up mixed declarations and code. --- linux-core/xgi_cmdlist.c | 3 ++- linux-core/xgi_misc.c | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e00ea228..99be2145 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -196,9 +196,10 @@ void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { + U32 *lastBatchVirtAddr; + XGI_INFO ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); - U32 *lastBatchVirtAddr; /* Jong 05/25/2006 */ /* return; */ diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 61e40594..06cf0160 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -200,13 +200,15 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) STALL_INTERRUPT_RESET_THRESHOLD) { continoue_int_count = 0; } else if (continoue_int_count >= 3) { + int time_out; + continoue_int_count = 0; // GE Hung up, need reset. XGI_INFO("Reset GE!\n"); *(mmio_vbase + 0xb057) = 8; - int time_out = 0xffff; + time_out = 0xffff; while (0 != (ge_3d_status[0x00] & 0xf0000000)) { @@ -214,6 +216,11 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) ((--time_out) & 0xfff)) ; if (0 == time_out) { + U8 old_3ce; + U8 old_3cf; + U8 old_index; + U8 old_36; + XGI_INFO ("Can not reset back 0x%lx!\n", ge_3d_status @@ -222,24 +229,24 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) 0xb057) = 0; // Have to use 3x5.36 to reset. // Save and close dynamic gating - U8 old_3ce = + old_3ce = *(mmio_vbase + 0x3ce); *(mmio_vbase + 0x3ce) = 0x2a; - U8 old_3cf = + old_3cf = *(mmio_vbase + 0x3cf); *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; // Reset GE - U8 old_index = + old_index = *(mmio_vbase + 0x3d4); *(mmio_vbase + 0x3d4) = 0x36; - U8 old_36 = + old_36 = *(mmio_vbase + 0x3d5); *(mmio_vbase + From 8cee7dca95bc2114eb90640cf83ac87c29243683 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:46:36 -0700 Subject: [PATCH 043/437] Clean up warnings about unused variables and functions. --- linux-core/xgi_drv.c | 11 ----------- linux-core/xgi_misc.c | 34 ++-------------------------------- linux-core/xgi_pcie.c | 2 ++ 3 files changed, 4 insertions(+), 43 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 0c37d00e..75204283 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -153,11 +153,6 @@ static inline void xgi_check_pci_config(xgi_info_t * info, int line) pci_write_config_word(info->dev, PCI_COMMAND, cmd); } -static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info) -{ - return 1; -} - /* * struct pci_device_id { * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID @@ -1484,14 +1479,8 @@ static int __init xgi_init_module(void) void __exit xgi_exit_module(void) { int i; - xgi_info_t *info, *max_devices; #ifdef CONFIG_DEVFS_FS - /* - XGI_DEVFS_REMOVE_CONTROL(); - for (i = 0; i < XGI_MAX_DEVICES; i++) - XGI_DEVFS_REMOVE_DEVICE(i); - */ XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); #endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 06cf0160..8d0e81b6 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -131,7 +131,7 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) BOOL is_wrong_signal = FALSE; static U32 last_int_tick_low, last_int_tick_high; - static U32 new_int_tick_low, new_int_tick_high; + static U32 new_int_tick_low; static U32 continoue_int_count = 0; // OE II is busy. while (old_ge_status & 0x001c0000) { @@ -290,9 +290,6 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) BOOL xgi_crt_irq_handler(xgi_info_t * info) { BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened @@ -303,15 +300,6 @@ BOOL xgi_crt_irq_handler(xgi_info_t * info) // What happened? op3cf_37 = bIn3cf(0x37); -#if 0 - if (op3cf_37 & 0x04) - device_status |= GDEVST_CONNECT; - else - device_status &= ~GDEVST_CONNECT; - - device_status |= GDEVST_DEVICE_CHANGED; - hw_status |= HWST_DEVICE_CHANGED; -#endif // Clear CRT interrupt op3cf_3d = bIn3cf(0x3d); bOut3cf(0x3d, (op3cf_3d | 0x04)); @@ -326,9 +314,6 @@ BOOL xgi_crt_irq_handler(xgi_info_t * info) BOOL xgi_dvi_irq_handler(xgi_info_t * info) { BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened @@ -340,28 +325,13 @@ BOOL xgi_dvi_irq_handler(xgi_info_t * info) // What happened? op3cf_37 = bIn3cf(0x37); -#if 0 - //Also update our internal flag - if (op3cf_37 & 0x10) // Second Monitor plugged In - { - device_status |= GDEVST_CONNECT; - //Because currenly we cannot determine if DVI digital - //or DVI analog is connected according to DVI interrupt - //We should still call BIOS to check it when utility ask us - device_status &= ~GDEVST_CHECKED; - } else { - device_status &= ~GDEVST_CONNECT; - } -#endif + //Notify BIOS that DVI plug/unplug happened op3x5_5a = bIn3x5(0x5a); bOut3x5(0x5a, op3x5_5a & 0xf7); bWriteReg(0x3d4, save_3x4); - //device_status |= GDEVST_DEVICE_CHANGED; - //hw_status |= HWST_DEVICE_CHANGED; - // Clear DVI interrupt op3cf_39 = bIn3cf(0x39); bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 9457770a..8b024e4a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -350,8 +350,10 @@ void xgi_pcie_heap_check(void) struct list_head *useList, *temp; xgi_pcie_block_t *block; unsigned int ownerIndex; +#ifdef XGI_DEBUG char *ownerStr[6] = { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; +#endif if (xgi_pcie_heap) { useList = &xgi_pcie_heap->used_list; From 9b9a127ed0fe9a6a8e2fde84739ccff6fa0bc5ac Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 26 Jun 2007 23:25:40 +0200 Subject: [PATCH 044/437] More 64-bit padding. --- shared-core/drm.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/shared-core/drm.h b/shared-core/drm.h index 1b0e54e3..e017c023 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -676,7 +676,7 @@ typedef struct drm_fence_arg { unsigned int type; unsigned int flags; unsigned int signaled; - unsigned int pad_64; + unsigned int pad64; drm_u64_t expand_pad[3]; /*Future expansion */ } drm_fence_arg_t; @@ -797,6 +797,7 @@ struct drm_bo_info_req { unsigned int handle; unsigned int hint; unsigned int fence_class; + unsigned int pad64; }; struct drm_bo_create_req { @@ -806,6 +807,7 @@ struct drm_bo_create_req { unsigned int hint; unsigned int page_alignment; drm_bo_type_t type; + unsigned int pad64; }; struct drm_bo_op_req { @@ -838,13 +840,14 @@ struct drm_bo_info_rep { unsigned int desired_tile_stride; unsigned int hw_tile_stride; unsigned int tile_info; - unsigned int pad64; + unsigned int pad64; drm_u64_t expand_pad[4]; /*Future expansion */ }; struct drm_bo_arg_rep { struct drm_bo_info_rep bo_info; int ret; + unsigned int pad64; }; struct drm_bo_create_arg { @@ -873,13 +876,13 @@ struct drm_bo_map_wait_idle_arg { }; struct drm_bo_op_arg { - int handled; - unsigned int pad_64; drm_u64_t next; union { struct drm_bo_op_req req; struct drm_bo_arg_rep rep; } d; + int handled; + unsigned int pad64; }; #define DRM_BO_MEM_LOCAL 0 From 4f2dd78ff3b6efeee97b72cca6bbfaef485a08d9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 28 Jun 2007 02:56:30 +1000 Subject: [PATCH 045/437] nouveau/nv04: Set NV_PFIFO_CACHE1_PUSH1 correctly + small tweaks --- shared-core/nv04_fifo.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index 34a497b7..783514a7 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -53,10 +53,9 @@ nv04_fifo_create_context(drm_device_t *dev, int channel) RAMFC_WR(DMA_PUT, chan->pushbuf_base); RAMFC_WR(DMA_GET, chan->pushbuf_base); RAMFC_WR(DMA_INSTANCE, nouveau_chip_instance_get(dev, pb->instance)); - /* NOTE: nvidia use TRIG_128/SIZE_128/MAX_REQS_8 */ - RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | + RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | #ifdef __BIG_ENDIAN NV_PFIFO_CACHE1_BIG_ENDIAN | #endif @@ -83,8 +82,10 @@ nv04_fifo_load_context(drm_device_t *dev, int channel) int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); uint32_t tmp; - NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); tmp = RAMFC_RD(DMA_INSTANCE); NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); From 695599f18d907bb277805581bbe208b0e083e7d9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 19:03:35 +1000 Subject: [PATCH 046/437] nouveau: Nuke DMA_OBJECT_INIT ioctl (bumps interface to 0.0.7) For various reasons, this ioctl was a bad idea. At channel creation we now automatically create DMA objects covering available VRAM and GART memory, where the client used to do this themselves. However, there is still a need to be able to create DMA objects pointing at specific areas of memory (ie. notifiers). Each channel is now allocated a small amount of memory from which a client can suballocate things (such as notifiers), and have a DMA object created which covers the suballocated area. The NOTIFIER_ALLOC ioctl exposes this functionality. --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_notifier.c | 1 + shared-core/nouveau_drm.h | 28 +++--- shared-core/nouveau_drv.h | 28 +++++- shared-core/nouveau_fifo.c | 45 ++++++++-- shared-core/nouveau_mem.c | 51 +++++++---- shared-core/nouveau_notifier.c | 154 ++++++++++++++++++++++++++++++++ shared-core/nouveau_object.c | 155 +++++++++++---------------------- shared-core/nouveau_state.c | 3 +- 9 files changed, 321 insertions(+), 146 deletions(-) create mode 120000 linux-core/nouveau_notifier.c create mode 100644 shared-core/nouveau_notifier.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 9427a04b..6ab17a49 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -21,7 +21,7 @@ i810-objs := i810_drv.o i810_dma.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_notifier.c b/linux-core/nouveau_notifier.c new file mode 120000 index 00000000..285469c5 --- /dev/null +++ b/linux-core/nouveau_notifier.c @@ -0,0 +1 @@ +../shared-core/nouveau_notifier.c \ No newline at end of file diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 1e7322e0..0758991a 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -25,9 +25,12 @@ #ifndef __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__ -#define NOUVEAU_DRM_HEADER_PATCHLEVEL 6 +#define NOUVEAU_DRM_HEADER_PATCHLEVEL 7 typedef struct drm_nouveau_fifo_alloc { + uint32_t fb_ctxdma_handle; + uint32_t tt_ctxdma_handle; + int channel; uint32_t put_base; /* FIFO control regs */ @@ -36,29 +39,30 @@ typedef struct drm_nouveau_fifo_alloc { /* DMA command buffer */ drm_handle_t cmdbuf; int cmdbuf_size; + /* Notifier memory */ + drm_handle_t notifier; + int notifier_size; } drm_nouveau_fifo_alloc_t; -typedef struct drm_nouveau_object_init { +typedef struct drm_nouveau_grobj_alloc { int channel; uint32_t handle; int class; } -drm_nouveau_object_init_t; +drm_nouveau_grobj_alloc_t; #define NOUVEAU_MEM_ACCESS_RO 1 #define NOUVEAU_MEM_ACCESS_WO 2 #define NOUVEAU_MEM_ACCESS_RW 3 -typedef struct drm_nouveau_dma_object_init { +typedef struct drm_nouveau_notifier_alloc { int channel; uint32_t handle; - int class; - int access; - int target; + int count; + uint32_t offset; - int size; } -drm_nouveau_dma_object_init_t; +drm_nouveau_notifier_alloc_t; #define NOUVEAU_MEM_FB 0x00000001 #define NOUVEAU_MEM_AGP 0x00000002 @@ -68,7 +72,7 @@ drm_nouveau_dma_object_init_t; #define NOUVEAU_MEM_USER_BACKED 0x00000020 #define NOUVEAU_MEM_MAPPED 0x00000040 #define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */ - +#define NOUVEAU_MEM_NOTIFIER 0x00000100 /* internal */ typedef struct drm_nouveau_mem_alloc { int flags; int alignment; @@ -141,8 +145,8 @@ typedef struct drm_nouveau_sarea { drm_nouveau_sarea_t; #define DRM_NOUVEAU_FIFO_ALLOC 0x00 -#define DRM_NOUVEAU_OBJECT_INIT 0x01 -#define DRM_NOUVEAU_DMA_OBJECT_INIT 0x02 +#define DRM_NOUVEAU_GROBJ_ALLOC 0x01 +#define DRM_NOUVEAU_NOTIFIER_ALLOC 0x02 #define DRM_NOUVEAU_MEM_ALLOC 0x03 #define DRM_NOUVEAU_MEM_FREE 0x04 #define DRM_NOUVEAU_GETPARAM 0x05 diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index b3122d8a..7a1ca3d5 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -34,7 +34,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 6 +#define DRIVER_PATCHLEVEL 7 #define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FLAGS 0xFFFF0000 @@ -84,6 +84,10 @@ struct nouveau_fifo struct mem_block *cmdbuf_mem; struct nouveau_object *cmdbuf_obj; uint32_t pushbuf_base; + /* notifier memory */ + struct mem_block *notifier_block; + struct mem_block *notifier_heap; + drm_local_map_t *notifier_map; /* PGRAPH context, for cards that keep it in RAMIN */ struct mem_block *ramin_grctx; /* objects belonging to this fifo */ @@ -197,6 +201,12 @@ extern void nouveau_wait_for_idle(struct drm_device *dev); extern int nouveau_ioctl_card_init(DRM_IOCTL_ARGS); /* nouveau_mem.c */ +extern int nouveau_mem_init_heap(struct mem_block **, + uint64_t start, uint64_t size); +extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, + uint64_t size, int align2, + DRMFILE); +extern void nouveau_mem_free_block(struct mem_block *); extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap); extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS); @@ -216,6 +226,13 @@ extern void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, struct mem_block *mem, int index, uint32_t val); +/* nouveau_notifier.c */ +extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE); +extern void nouveau_notifier_takedown_channel(drm_device_t *, int channel); +extern int nouveau_notifier_alloc(drm_device_t *, int channel, + uint32_t handle, int cout, uint32_t *offset); +extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS); + /* nouveau_fifo.c */ extern int nouveau_fifo_init(drm_device_t *dev); extern int nouveau_fifo_number(drm_device_t *dev); @@ -225,7 +242,13 @@ extern int nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel); extern void nouveau_fifo_free(drm_device_t *dev, int channel); /* nouveau_object.c */ +extern int nouveau_object_init_channel(drm_device_t *, int channel, + uint32_t vram_handle, + uint32_t tt_handle); +extern void nouveau_object_takedown_channel(drm_device_t *dev, int channel); extern void nouveau_object_cleanup(drm_device_t *dev, int channel); +extern int nouveau_ht_object_insert(drm_device_t *, int channel, + uint32_t handle, struct nouveau_object *); extern struct nouveau_object * nouveau_object_gr_create(drm_device_t *dev, int channel, int class); extern struct nouveau_object * @@ -233,8 +256,7 @@ nouveau_object_dma_create(drm_device_t *dev, int channel, int class, uint32_t offset, uint32_t size, int access, int target); extern void nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj); -extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS); -extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem); /* nouveau_irq.c */ diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 1a06f913..f179af63 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -241,7 +241,8 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } /* allocates and initializes a fifo for user space consumption */ -static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) +int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, + uint32_t vram_handle, uint32_t tt_handle) { int ret; drm_nouveau_private_t *dev_priv = dev->dev_private; @@ -282,6 +283,20 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) return ret; } + /* Setup channel's default objects */ + ret = nouveau_object_init_channel(dev, channel, vram_handle, tt_handle); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + + /* Allocate space for per-channel fixed notifier memory */ + ret = nouveau_notifier_init_channel(dev, channel, filp); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + nouveau_wait_for_idle(dev); /* disable the fifo caches */ @@ -370,6 +385,8 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) if (chan->cmdbuf_mem) nouveau_mem_free(dev, chan->cmdbuf_mem); + nouveau_notifier_takedown_channel(dev, channel); + /* Destroy objects belonging to the channel */ nouveau_object_cleanup(dev, channel); @@ -408,30 +425,42 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan; drm_nouveau_fifo_alloc_t init; int res; DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init)); - res = nouveau_fifo_alloc(dev, &init.channel, filp); + res = nouveau_fifo_alloc(dev, &init.channel, filp, + init.fb_ctxdma_handle, + init.tt_ctxdma_handle); if (res) return res; + chan = &dev_priv->fifos[init.channel]; - init.put_base = dev_priv->fifos[init.channel].pushbuf_base; + init.put_base = chan->pushbuf_base; /* make the fifo available to user space */ /* first, the fifo control regs */ init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel); init.ctrl_size = NV03_FIFO_REGS_SIZE; res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS, - 0, &dev_priv->fifos[init.channel].regs); + 0, &chan->regs); if (res != 0) return res; /* pass back FIFO map info to the caller */ - init.cmdbuf = dev_priv->fifos[init.channel].cmdbuf_mem->start; - init.cmdbuf_size = dev_priv->fifos[init.channel].cmdbuf_mem->size; + init.cmdbuf = chan->cmdbuf_mem->start; + init.cmdbuf_size = chan->cmdbuf_mem->size; + + /* and the notifier block */ + init.notifier = chan->notifier_block->start; + init.notifier_size = chan->notifier_block->size; + res = drm_addmap(dev, init.notifier, init.notifier_size, _DRM_REGISTERS, + 0, &chan->notifier_map); + if (res != 0) + return res; DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init)); @@ -444,8 +473,8 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) drm_ioctl_desc_t nouveau_ioctls[] = { [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_OBJECT_INIT)] = {nouveau_ioctl_object_init, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_DMA_OBJECT_INIT)] = {nouveau_ioctl_dma_object_init, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH}, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index a5343b99..edfc9d3f 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -77,8 +77,8 @@ out: return p; } -static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size, - int align2, DRMFILE filp) +struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, + int align2, DRMFILE filp) { struct mem_block *p; uint64_t mask = (1 << align2) - 1; @@ -106,7 +106,7 @@ static struct mem_block *find_block(struct mem_block *heap, uint64_t start) return NULL; } -static void free_block(struct mem_block *p) +void nouveau_mem_free_block(struct mem_block *p) { p->filp = NULL; @@ -132,7 +132,8 @@ static void free_block(struct mem_block *p) /* Initialize. How to check for an uninitialized heap? */ -static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size) +int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, + uint64_t size) { struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); @@ -331,7 +332,9 @@ int nouveau_mem_init(struct drm_device *dev) goto no_agp; } - if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size)) + if (nouveau_mem_init_heap(&dev_priv->agp_heap, + info.aperture_base, + info.aperture_size)) goto no_agp; dev_priv->agp_phys = info.aperture_base; @@ -357,12 +360,19 @@ no_agp: if (fb_size>256*1024*1024) { /* On cards with > 256Mb, you can't map everything. * So we create a second FB heap for that type of memory */ - if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024)) + if (nouveau_mem_init_heap(&dev_priv->fb_heap, + drm_get_resource_start(dev,1), + 256*1024*1024)) return DRM_ERR(ENOMEM); - if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, fb_size-256*1024*1024)) + if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, + drm_get_resource_start(dev,1) + + 256*1024*1024, + fb_size-256*1024*1024)) return DRM_ERR(ENOMEM); } else { - if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), fb_size)) + if (nouveau_mem_init_heap(&dev_priv->fb_heap, + drm_get_resource_start(dev,1), + fb_size)) return DRM_ERR(ENOMEM); dev_priv->fb_nomap_heap=NULL; } @@ -397,21 +407,25 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6 if (flags&NOUVEAU_MEM_AGP) { type=NOUVEAU_MEM_AGP; - block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->agp_heap, size, + alignment, filp); if (block) goto alloc_ok; } if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) { type=NOUVEAU_MEM_FB; if (!(flags&NOUVEAU_MEM_MAPPED)) { - block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap, + size, alignment, filp); if (block) goto alloc_ok; } - block = alloc_block(dev_priv->fb_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->fb_heap, size, + alignment, filp); if (block) goto alloc_ok; } if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) { type=NOUVEAU_MEM_AGP; - block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->agp_heap, size, + alignment, filp); if (block) goto alloc_ok; } @@ -432,7 +446,7 @@ alloc_ok: ret = drm_addmap(dev, block->start, block->size, _DRM_FRAME_BUFFER, 0, &block->map); if (ret) { - free_block(block); + nouveau_mem_free_block(block); return NULL; } } @@ -446,7 +460,7 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) DRM_INFO("freeing 0x%llx\n", block->start); if (block->flags&NOUVEAU_MEM_MAPPED) drm_rmmap(dev, block->map); - free_block(block); + nouveau_mem_free_block(block); } static void @@ -549,8 +563,8 @@ int nouveau_instmem_init(struct drm_device *dev) * the space that was reserved for RAMHT/FC/RO. */ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; - ret = init_heap(&dev_priv->ramin_heap, - offset, dev_priv->ramin_size - offset); + ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, + offset, dev_priv->ramin_size - offset); if (ret) { dev_priv->ramin_heap = NULL; DRM_ERROR("Failed to init RAMIN heap\n"); @@ -570,7 +584,8 @@ struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, return NULL; } - block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2); + block = nouveau_mem_alloc_block(dev_priv->ramin_heap, size, align, + (DRMFILE)-2); if (block) { block->flags = NOUVEAU_MEM_INSTANCE; DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n", @@ -583,7 +598,7 @@ struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block) { if (dev && block) { - free_block(block); + nouveau_mem_free_block(block); } } diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c new file mode 100644 index 00000000..ab6f8c2d --- /dev/null +++ b/shared-core/nouveau_notifier.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +int +nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + int flags, ret; + + /*TODO: PCI notifier blocks */ + if (dev_priv->agp_heap) + flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; + else + flags = NOUVEAU_MEM_FB; + + chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp); + if (!chan->notifier_block) + return DRM_ERR(ENOMEM); + + ret = nouveau_mem_init_heap(&chan->notifier_heap, + 0, chan->notifier_block->size); + if (ret) + return ret; + + return 0; +} + +void +nouveau_notifier_takedown_channel(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->notifier_block) { + nouveau_mem_free(dev, chan->notifier_block); + chan->notifier_block = NULL; + } + + /*XXX: heap destroy */ +} + +int +nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, + int count, uint32_t *b_offset) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_object *obj; + struct mem_block *mem; + uint32_t offset; + int target; + + if (!chan->notifier_heap) { + DRM_ERROR("Channel %d doesn't have a notifier heap!\n", + channel); + return DRM_ERR(EINVAL); + } + + mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->filp); + if (!mem) { + DRM_ERROR("Channel %d notifier block full\n", channel); + return DRM_ERR(ENOMEM); + } + mem->flags = NOUVEAU_MEM_NOTIFIER; + + offset = chan->notifier_block->start + mem->start; + if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { + offset -= drm_get_resource_start(dev, 1); + target = NV_DMA_TARGET_VIDMEM; + } else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { + offset -= dev_priv->agp_phys; + target = NV_DMA_TARGET_AGP; + } else { + DRM_ERROR("Bad DMA target, flags 0x%08x!\n", + chan->notifier_block->flags); + return DRM_ERR(EINVAL); + } + + obj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + offset, mem->size, NV_DMA_ACCESS_RW, + target); + if (!obj) { + nouveau_mem_free_block(mem); + DRM_ERROR("Error creating notifier ctxdma\n"); + return DRM_ERR(ENOMEM); + } + + obj->handle = handle; + if (nouveau_ht_object_insert(dev, channel, handle, obj)) { + nouveau_object_free(dev, obj); + nouveau_mem_free_block(mem); + DRM_ERROR("Error inserting notifier ctxdma into RAMHT\n"); + return DRM_ERR(ENOMEM); + } + + *b_offset = mem->start; + return 0; +} + +int +nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_notifier_alloc_t na; + int ret; + + DRM_COPY_FROM_USER_IOCTL(na, (drm_nouveau_notifier_alloc_t __user*)data, + sizeof(na)); + + if (!nouveau_fifo_owner(dev, filp, na.channel)) { + DRM_ERROR("pid %d doesn't own channel %d\n", + DRM_CURRENTPID, na.channel); + return DRM_ERR(EPERM); + } + + ret = nouveau_notifier_alloc(dev, na.channel, na.handle, + na.count, &na.offset); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((drm_nouveau_notifier_alloc_t __user*)data, + na, sizeof(na)); + return 0; +} + diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e36568c6..e7528e23 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -153,13 +153,13 @@ nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) return hash << 3; } -static int +int nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; int ht_base = NV_RAMIN + dev_priv->ramht_offset; - int ht_end = ht_base + dev_priv->ramht_size; +/* int ht_end = ht_base + dev_priv->ramht_size; */ int o_ofs, ofs; obj->handle = handle; @@ -461,6 +461,54 @@ nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); } +int +nouveau_object_init_channel(drm_device_t *dev, int channel, + uint32_t vram_handle, + uint32_t tt_handle) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *gpuobj; + int ret; + + /* VRAM ctxdma */ + gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM); + if (!gpuobj) { + DRM_ERROR("Error creating VRAM ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_ht_object_insert(dev, channel, vram_handle, gpuobj); + if (ret) { + DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); + return ret; + } + + /* non-AGP unimplemented */ + if (dev_priv->agp_heap == NULL) + return 0; + + /* GART ctxdma */ + gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP); + if (!gpuobj) { + DRM_ERROR("Error creating TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_ht_object_insert(dev, channel, tt_handle, gpuobj); + if (ret) { + DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); + return ret; + } + + return 0; +} + void nouveau_object_cleanup(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv=dev->dev_private; @@ -470,13 +518,13 @@ void nouveau_object_cleanup(drm_device_t *dev, int channel) } } -int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) +int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_object_init_t init; + drm_nouveau_grobj_alloc_t init; struct nouveau_object *obj; - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) data, sizeof(init)); if (!nouveau_fifo_owner(dev, filp, init.channel)) { @@ -505,100 +553,3 @@ int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) return 0; } -static int -nouveau_dma_object_check_access(drm_device_t *dev, - drm_nouveau_dma_object_init_t *init) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint64_t limit; - - /* Check for known DMA object classes */ - switch (init->class) { - case NV_CLASS_DMA_IN_MEMORY: - case NV_CLASS_DMA_FROM_MEMORY: - case NV_CLASS_DMA_TO_MEMORY: - break; - default: - DRM_ERROR("invalid class = 0x%x\n", init->class); - return DRM_ERR(EPERM); - } - - /* Check access mode, and translate to NV_DMA_ACCESS_* */ - switch (init->access) { - case NOUVEAU_MEM_ACCESS_RO: - init->access = NV_DMA_ACCESS_RO; - break; - case NOUVEAU_MEM_ACCESS_WO: - init->access = NV_DMA_ACCESS_WO; - break; - case NOUVEAU_MEM_ACCESS_RW: - init->access = NV_DMA_ACCESS_RW; - break; - default: - DRM_ERROR("invalid access mode = %d\n", init->access); - return DRM_ERR(EPERM); - } - - /* Check that request is within the allowed limits of "target" */ - switch (init->target) { - case NOUVEAU_MEM_FB: - limit = dev_priv->fb_available_size; - init->target = NV_DMA_TARGET_VIDMEM; - break; - case NOUVEAU_MEM_AGP: - limit = dev_priv->agp_available_size; - init->target = NV_DMA_TARGET_AGP; - break; - default: - DRM_ERROR("invalid target = 0x%x\n", init->target); - return DRM_ERR(EPERM); - } - - if ((init->offset > limit) || (init->offset + init->size) > limit) { - DRM_ERROR("access out of allowed range (%d,0x%08x,0x%08x)\n", - init->target, init->offset, init->size); - return DRM_ERR(EPERM); - } - - return 0; -} - -int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - drm_nouveau_dma_object_init_t init; - struct nouveau_object *obj; - - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) - data, sizeof(init)); - - if (!nouveau_fifo_owner(dev, filp, init.channel)) { - DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, init.channel); - return DRM_ERR(EINVAL); - } - - if (nouveau_dma_object_check_access(dev, &init)) - return DRM_ERR(EPERM); - - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); - return DRM_ERR(EINVAL); - } - - obj = nouveau_object_dma_create(dev, init.channel, init.class, - init.offset, init.size, - init.access, init.target); - if (!obj) - return DRM_ERR(ENOMEM); - - obj->handle = init.handle; - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); - } - - return 0; -} - diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index b3562e2f..68392c3a 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -260,9 +260,9 @@ void nouveau_preclose(drm_device_t * dev, DRMFILE filp) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_fifo_cleanup(dev, filp); nouveau_mem_release(filp,dev_priv->fb_heap); nouveau_mem_release(filp,dev_priv->agp_heap); - nouveau_fifo_cleanup(dev, filp); } /* first module load, setup the mmio/fb mapping */ @@ -282,7 +282,6 @@ int nouveau_firstopen(struct drm_device *dev) int nouveau_load(struct drm_device *dev, unsigned long flags) { drm_nouveau_private_t *dev_priv; - int ret; if (flags==NV_UNKNOWN) return DRM_ERR(EINVAL); From ce0d528d3ca78348a7c1ad7c402757824fb6cf95 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 20:49:19 +1000 Subject: [PATCH 047/437] nouveau/nv50: skeletal backend --- linux-core/Makefile.kernel | 6 +-- linux-core/nv50_fifo.c | 1 + linux-core/nv50_graph.c | 1 + linux-core/nv50_mc.c | 1 + shared-core/drm_pciids.txt | 8 ++++ shared-core/nouveau_drv.h | 20 +++++++++ shared-core/nouveau_state.c | 21 ++++++++++ shared-core/nv50_fifo.c | 83 ++++++++++++++++++++++++++++++++++++ shared-core/nv50_graph.c | 84 +++++++++++++++++++++++++++++++++++++ shared-core/nv50_mc.c | 42 +++++++++++++++++++ 10 files changed, 264 insertions(+), 3 deletions(-) create mode 120000 linux-core/nv50_fifo.c create mode 120000 linux-core/nv50_graph.c create mode 120000 linux-core/nv50_mc.c create mode 100644 shared-core/nv50_fifo.c create mode 100644 shared-core/nv50_graph.c create mode 100644 shared-core/nv50_mc.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6ab17a49..478c4df0 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -23,11 +23,11 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ nv04_timer.o \ - nv04_mc.o nv40_mc.o \ + nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ - nv04_fifo.o nv10_fifo.o nv40_fifo.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ - nv40_graph.o + nv40_graph.o nv50_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o diff --git a/linux-core/nv50_fifo.c b/linux-core/nv50_fifo.c new file mode 120000 index 00000000..4c9990a9 --- /dev/null +++ b/linux-core/nv50_fifo.c @@ -0,0 +1 @@ +../shared-core/nv50_fifo.c \ No newline at end of file diff --git a/linux-core/nv50_graph.c b/linux-core/nv50_graph.c new file mode 120000 index 00000000..03f69e68 --- /dev/null +++ b/linux-core/nv50_graph.c @@ -0,0 +1 @@ +../shared-core/nv50_graph.c \ No newline at end of file diff --git a/linux-core/nv50_mc.c b/linux-core/nv50_mc.c new file mode 120000 index 00000000..f4bb369e --- /dev/null +++ b/linux-core/nv50_mc.c @@ -0,0 +1 @@ +../shared-core/nv50_mc.c \ No newline at end of file diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index ad9994ec..126974d0 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -596,6 +596,9 @@ 0x10de 0x018d NV_17 "GeForce4 448 Go" 0x10de 0x0191 NV_50 "GeForce 8800 GTX" 0x10de 0x0193 NV_50 "GeForce 8800 GTS" +0x10de 0x0194 NV_50 "GeForce 8800 Ultra" +0x10de 0x019d NV_50 "Quadro FX 5600" +0x10de 0x019e NV_50 "Quadro FX 4600" 0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" 0x10de 0x01d1 NV_44 "GeForce 7300 LE" 0x10de 0x01d6 NV_44 "GeForce Go 7200" @@ -702,6 +705,11 @@ 0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" 0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" 0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420" +0x10de 0x0400 NV_50 "GeForce 8600 GTS" +0x10de 0x0402 NV_50 "GeForce 8600 GT" +0x10de 0x0421 NV_50 "GeForce 8500 GT" +0x10de 0x0422 NV_50 "GeForce 8400 GS" +0x10de 0x0423 NV_50 "GeForce 8300 GS" 0x12d2 0x0008 NV_03 "NV1" 0x12d2 0x0009 NV_03 "DAC64" 0x12d2 0x0018 NV_03 "Riva128" diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 7a1ca3d5..f549e600 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -295,6 +295,14 @@ extern void nv40_fifo_destroy_context(drm_device_t *, int channel); extern int nv40_fifo_load_context(drm_device_t *, int channel); extern int nv40_fifo_save_context(drm_device_t *, int channel); +/* nv50_fifo.c */ +extern int nv50_fifo_init(drm_device_t *); +extern void nv50_fifo_takedown(drm_device_t *); +extern int nv50_fifo_create_context(drm_device_t *, int channel); +extern void nv50_fifo_destroy_context(drm_device_t *, int channel); +extern int nv50_fifo_load_context(drm_device_t *, int channel); +extern int nv50_fifo_save_context(drm_device_t *, int channel); + /* nv04_graph.c */ extern void nouveau_nv04_context_switch(drm_device_t *dev); extern int nv04_graph_init(drm_device_t *dev); @@ -338,6 +346,14 @@ extern void nv40_graph_destroy_context(drm_device_t *, int channel); extern int nv40_graph_load_context(drm_device_t *, int channel); extern int nv40_graph_save_context(drm_device_t *, int channel); +/* nv50_graph.c */ +extern int nv50_graph_init(drm_device_t *); +extern void nv50_graph_takedown(drm_device_t *); +extern int nv50_graph_create_context(drm_device_t *, int channel); +extern void nv50_graph_destroy_context(drm_device_t *, int channel); +extern int nv50_graph_load_context(drm_device_t *, int channel); +extern int nv50_graph_save_context(drm_device_t *, int channel); + /* nv04_mc.c */ extern int nv04_mc_init(drm_device_t *dev); extern void nv04_mc_takedown(drm_device_t *dev); @@ -346,6 +362,10 @@ extern void nv04_mc_takedown(drm_device_t *dev); extern int nv40_mc_init(drm_device_t *dev); extern void nv40_mc_takedown(drm_device_t *dev); +/* nv50_mc.c */ +extern int nv50_mc_init(drm_device_t *dev); +extern void nv50_mc_takedown(drm_device_t *dev); + /* nv04_timer.c */ extern int nv04_timer_init(drm_device_t *dev); extern void nv04_timer_takedown(drm_device_t *dev); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 68392c3a..0cb82355 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -72,6 +72,7 @@ static int nouveau_init_card_mappings(drm_device_t *dev) return 0; } +static int nouveau_stub_init(drm_device_t *dev) { return 0; } static void nouveau_stub_takedown(drm_device_t *dev) {} static int nouveau_init_engine_ptrs(drm_device_t *dev) { @@ -187,6 +188,26 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fifo.save_context = nv40_fifo_save_context; break; case 0x50: + case 0x80: /* gotta love NVIDIA's consistency.. */ + engine->mc.init = nv50_mc_init; + engine->mc.takedown = nv50_mc_takedown; + engine->timer.init = nouveau_stub_init; + engine->timer.takedown = nouveau_stub_takedown; + engine->fb.init = nouveau_stub_init; + engine->fb.takedown = nouveau_stub_takedown; + engine->graph.init = nv50_graph_init; + engine->graph.takedown = nv50_graph_takedown; + engine->graph.create_context = nv50_graph_create_context; + engine->graph.destroy_context = nv50_graph_destroy_context; + engine->graph.load_context = nv50_graph_load_context; + engine->graph.save_context = nv50_graph_save_context; + engine->fifo.init = nv50_fifo_init; + engine->fifo.takedown = nv50_fifo_takedown; + engine->fifo.create_context = nv50_fifo_create_context; + engine->fifo.destroy_context = nv50_fifo_destroy_context; + engine->fifo.load_context = nv50_fifo_load_context; + engine->fifo.save_context = nv50_fifo_save_context; + break; default: DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset); return 1; diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c new file mode 100644 index 00000000..e5d37949 --- /dev/null +++ b/shared-core/nv50_fifo.c @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +static void +nv50_fifo_init_reset(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t pmc_e; + + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PFIFO); + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO); +} + +int +nv50_fifo_init(drm_device_t *dev) +{ + nv50_fifo_init_reset(dev); + + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_fifo_takedown(drm_device_t *dev) +{ + DRM_ERROR("stub!\n"); +} + +int +nv50_fifo_create_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_fifo_destroy_context(drm_device_t *dev, int channel) +{ +} + +int +nv50_fifo_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int +nv50_fifo_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c new file mode 100644 index 00000000..8c3e2b9b --- /dev/null +++ b/shared-core/nv50_graph.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +static void +nv50_graph_init_reset(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t pmc_e; + + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PGRAPH); + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH); +} + +int +nv50_graph_init(drm_device_t *dev) +{ + nv50_graph_init_reset(dev); + + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_graph_takedown(drm_device_t *dev) +{ + DRM_ERROR("stub!\n"); +} + +int +nv50_graph_create_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_graph_destroy_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); +} + +int +nv50_graph_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int +nv50_graph_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + diff --git a/shared-core/nv50_mc.c b/shared-core/nv50_mc.c new file mode 100644 index 00000000..7f7537f0 --- /dev/null +++ b/shared-core/nv50_mc.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +int +nv50_mc_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); + return 0; +} + +void nv50_mc_takedown(drm_device_t *dev) +{ +} From 38617b6a26d893bbd7b235019159e609f6cdd84b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Jun 2007 03:52:06 +1000 Subject: [PATCH 048/437] nouveau: name some regs --- shared-core/nouveau_reg.h | 15 +++++++++++++++ shared-core/nv30_graph.c | 10 ++++++---- shared-core/nv40_graph.c | 29 +++++++++++++++++------------ 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index ba61f997..4c013c53 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -135,6 +135,17 @@ #define NV10_PGRAPH_CTX_CACHE4 0x004001C0 #define NV04_PGRAPH_CTX_CACHE4 0x004001E0 #define NV10_PGRAPH_CTX_CACHE5 0x004001E0 +#define NV40_PGRAPH_CTXCTL_0304 0x00400304 +#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_0310 0x00400310 +#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 +#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 +#define NV40_PGRAPH_CTXCTL_030C 0x0040030c +#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 +#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 +#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 +#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF #define NV03_PGRAPH_ABS_X_RAM 0x00400400 #define NV03_PGRAPH_ABS_Y_RAM 0x00400480 #define NV03_PGRAPH_X_MISC 0x00400500 @@ -230,7 +241,11 @@ #define NV10_PGRAPH_SCALED_FORMAT 0x00400778 #define NV10_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 #define NV10_PGRAPH_CHANNEL_CTX_SIZE 0x00400784 +#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 #define NV10_PGRAPH_CHANNEL_CTX_POINTER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 #define NV04_PGRAPH_PATT_COLOR0 0x00400800 #define NV04_PGRAPH_PATT_COLOR1 0x00400804 #define NV04_PGRAPH_PATTERN 0x00400808 diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 9f064a0a..7a87990a 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -174,8 +174,9 @@ int nv30_graph_load_context(drm_device_t *dev, int channel) return DRM_ERR(EINVAL); inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - NV_WRITE(0x400784, inst); - NV_WRITE(0x400788, 1); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, + NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); return nouveau_graph_wait_idle(dev); } @@ -190,8 +191,9 @@ int nv30_graph_save_context(drm_device_t *dev, int channel) return DRM_ERR(EINVAL); inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - NV_WRITE(0x400784, inst); - NV_WRITE(0x400788, 2); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, + NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); return nouveau_graph_wait_idle(dev); } diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index acd0cb0f..6fb575db 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1304,20 +1304,23 @@ nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) uint32_t old_cp, tv = 1000; int i; - old_cp = NV_READ(0x400784); - NV_WRITE(0x400784, inst); - NV_WRITE(0x400310, save ? 0x20 : 0x40); - NV_WRITE(0x400304, 1); + old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV40_PGRAPH_CTXCTL_0310, + save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : + NV40_PGRAPH_CTXCTL_0310_XFER_LOAD); + NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); for (i = 0; i < tv; i++) { - if (NV_READ(0x40030c) == 0) + if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) break; } - NV_WRITE(0x400784, old_cp); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); if (i == tv) { DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); - DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030c)); + DRM_ERROR("0x40030C = 0x%08x\n", + NV_READ(NV40_PGRAPH_CTXCTL_030C)); return DRM_ERR(EBUSY); } @@ -1365,8 +1368,10 @@ nv40_graph_load_context(drm_device_t *dev, int channel) * unknown as to what bit 24 does. The nv ddx has it set, so we will * set it here too. */ - NV_WRITE(0x400784, inst); - NV_WRITE(0x40032C, inst | 0x01000000); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, + (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) | + NV40_PGRAPH_CTXCTL_CUR_LOADED); /* 0x32E0 records the instance address of the active FIFO's PGRAPH * context. If at any time this doesn't match 0x40032C, you will * recieve PGRAPH_INTR_CONTEXT_SWITCH @@ -1631,15 +1636,15 @@ nv40_graph_init(drm_device_t *dev) DRM_DEBUG("Loading context-switch voodoo\n"); i = 0; - NV_WRITE(0x400324, 0); + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); while (ctx_voodoo[i] != ~0) { - NV_WRITE(0x400328, ctx_voodoo[i]); + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]); i++; } } /* No context present currently */ - NV_WRITE(0x40032C, 0x00000000); + NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); From 18a6d1c9c380b6b19524f654d9173a79e19aa1df Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Jun 2007 15:16:19 +1000 Subject: [PATCH 049/437] nouveau: simplify PRAMIN access --- shared-core/nouveau_drv.h | 18 +++++++++++------- shared-core/nouveau_mem.c | 32 -------------------------------- shared-core/nouveau_state.c | 18 ++++++++++++++++-- 3 files changed, 27 insertions(+), 41 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index f549e600..b2ddf0a6 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -220,11 +220,6 @@ extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev, uint32_t size, uint32_t align); extern void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block); -extern uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index); -extern void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index, - uint32_t val); /* nouveau_notifier.c */ extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE); @@ -381,8 +376,17 @@ extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, #define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) #endif -#define INSTANCE_WR(mem,ofs,val) nouveau_instmem_w32(dev_priv,(mem),(ofs),(val)) -#define INSTANCE_RD(mem,ofs) nouveau_instmem_r32(dev_priv,(mem),(ofs)) +/* PRAMIN access */ +#if defined(__powerpc__) +#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o)) +#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v)) +#else +#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o)) +#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v)) +#endif + +#define INSTANCE_RD(o,i) NV_RI32((o)->start + ((i)<<2)) +#define INSTANCE_WR(o,i,v) NV_WI32((o)->start + ((i)<<2), (v)) #endif /* __NOUVEAU_DRV_H__ */ diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index edfc9d3f..4c6d0d5c 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -602,38 +602,6 @@ void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block) } } -uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index) -{ - uint32_t ofs = (uint32_t)mem->start + (index<<2); - - if (dev_priv->ramin) { -#if defined(__powerpc__) - return in_be32((void __iomem *)(dev_priv->ramin)->handle + ofs); -#else - return DRM_READ32(dev_priv->ramin, ofs); -#endif - } else { - return NV_READ(NV_RAMIN+ofs); - } -} - -void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index, uint32_t val) -{ - uint32_t ofs = (uint32_t)mem->start + (index<<2); - - if (dev_priv->ramin) { -#if defined(__powerpc__) - out_be32((void __iomem *)(dev_priv->ramin)->handle + ofs, val); -#else - DRM_WRITE32(dev_priv->ramin, ofs, val); -#endif - } else { - NV_WRITE(NV_RAMIN+ofs, val); - } -} - /* * Ioctls */ diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 0cb82355..94d8081c 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -51,6 +51,7 @@ static int nouveau_init_card_mappings(drm_device_t *dev) DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset); /* map larger RAMIN aperture on NV40 cards */ + dev_priv->ramin = NULL; if (dev_priv->card_type >= NV_40) { int ramin_resource = 2; if (drm_get_resource_len(dev, ramin_resource) == 0) @@ -66,8 +67,21 @@ static int nouveau_init_card_mappings(drm_device_t *dev) "limited instance memory available\n"); dev_priv->ramin = NULL; } - } else - dev_priv->ramin = NULL; + } + + /* On older cards (or if the above failed), create a map covering + * the BAR0 PRAMIN aperture */ + if (!dev_priv->ramin) { + ret = drm_addmap(dev, + drm_get_resource_start(dev, 0) + NV_RAMIN, + (1*1024*1024), + _DRM_REGISTERS, _DRM_READ_ONLY, + &dev_priv->ramin); + if (ret) { + DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret); + return ret; + } + } return 0; } From 68ecf61647e9ec16d59cc8f50550d11478eb3118 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Jun 2007 15:42:55 +1000 Subject: [PATCH 050/437] nouveau: never touch PRAMIN with NV_WRITE, cleanup RAMHT code a bit --- shared-core/nouveau_drv.h | 5 +- shared-core/nouveau_mem.c | 9 +-- shared-core/nouveau_notifier.c | 2 +- shared-core/nouveau_object.c | 135 ++++++++++++++++++++------------- shared-core/nv04_fifo.c | 10 +-- shared-core/nv10_fifo.c | 16 ++-- shared-core/nv40_fifo.c | 16 ++-- 7 files changed, 107 insertions(+), 86 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index b2ddf0a6..3cca07fc 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -64,7 +64,6 @@ struct nouveau_object int channel; struct mem_block *instance; - uint32_t ht_loc; uint32_t handle; int class; @@ -242,8 +241,8 @@ extern int nouveau_object_init_channel(drm_device_t *, int channel, uint32_t tt_handle); extern void nouveau_object_takedown_channel(drm_device_t *dev, int channel); extern void nouveau_object_cleanup(drm_device_t *dev, int channel); -extern int nouveau_ht_object_insert(drm_device_t *, int channel, - uint32_t handle, struct nouveau_object *); +extern int nouveau_ramht_insert(drm_device_t *, int channel, + uint32_t handle, struct nouveau_object *); extern struct nouveau_object * nouveau_object_gr_create(drm_device_t *dev, int channel, int class); extern struct nouveau_object * diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 4c6d0d5c..d8ae52b7 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -489,13 +489,8 @@ nouveau_instmem_determine_amount(struct drm_device *dev) DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10); /* Clear all of it, except the BIOS image that's in the first 64KiB */ - if (dev_priv->ramin) { - for (i=(64*1024); iramin_size; i+=4) - DRM_WRITE32(dev_priv->ramin, i, 0x00000000); - } else { - for (i=(64*1024); iramin_size; i+=4) - DRM_WRITE32(dev_priv->mmio, NV_RAMIN + i, 0x00000000); - } + for (i=(64*1024); iramin_size; i+=4) + NV_WI32(i, 0x00000000); } static void diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index ab6f8c2d..0cfe733e 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -115,7 +115,7 @@ nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, } obj->handle = handle; - if (nouveau_ht_object_insert(dev, channel, handle, obj)) { + if (nouveau_ramht_insert(dev, channel, handle, obj)) { nouveau_object_free(dev, obj); nouveau_mem_free_block(mem); DRM_ERROR("Error inserting notifier ctxdma into RAMHT\n"); diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e7528e23..dac08df4 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -139,7 +139,7 @@ nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) is given as: */ static uint32_t -nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) +nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) { drm_nouveau_private_t *dev_priv=dev->dev_private; uint32_t hash = 0; @@ -153,63 +153,90 @@ nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) return hash << 3; } -int -nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, - struct nouveau_object *obj) +static int +nouveau_ramht_entry_valid(drm_device_t *dev, uint32_t ramht, uint32_t offset) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int ht_base = NV_RAMIN + dev_priv->ramht_offset; -/* int ht_end = ht_base + dev_priv->ramht_size; */ - int o_ofs, ofs; + uint32_t ctx = NV_RI32(ramht + offset + 4); - obj->handle = handle; - o_ofs = ofs = nouveau_ht_handle_hash(dev, channel, obj->handle); - - while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { - ofs += 8; - if (ofs == dev_priv->ramht_size) ofs = 0; - if (ofs == o_ofs) { - DRM_ERROR("no free hash table entries\n"); - return 1; - } - } - ofs += ht_base; - - DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", - channel, obj->handle, ofs); - - NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); - if (dev_priv->card_type >= NV_40) - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - else - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - NV_RAMHT_CONTEXT_VALID | - (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - - obj->ht_loc = ofs; - return 0; + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); } -static void nouveau_hash_table_remove(drm_device_t* dev, - struct nouveau_object *obj) +int +nouveau_ramht_insert(drm_device_t* dev, int channel, uint32_t handle, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + uint32_t ramht = dev_priv->ramht_offset; + uint32_t ctx, co, ho; + uint32_t inst; + + inst = nouveau_chip_instance_get(dev, obj->instance); + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | inst | + (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = inst | + (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + ctx = inst | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } + + co = ho = nouveau_ramht_hash_handle(dev, channel, handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + channel, co, handle, ctx); + NV_WI32(ramht + co + 0, handle); + NV_WI32(ramht + co + 4, ctx); + obj->handle = handle; + return 0; + } + DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", + channel, co, NV_RI32(ramht + co)); + + co += 8; + if (co == dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT space exhausted. ch=%d\n", channel); + return DRM_ERR(ENOMEM); +} + +static void +nouveau_ramht_remove(drm_device_t* dev, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t ramht = dev_priv->ramht_offset; + uint32_t co, ho; - DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n", - obj->handle, obj->ht_loc); - if (obj->ht_loc) { - DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n", - NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4)); - NV_WRITE(obj->ht_loc , 0x00000000); - NV_WRITE(obj->ht_loc+4, 0x00000000); - } + co = ho = nouveau_ramht_hash_handle(dev, obj->channel, obj->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + (obj->handle == NV_RI32(ramht + co))) { + DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + obj->channel, co, obj->handle, + NV_RI32(ramht + co + 4)); + NV_WI32(ramht + co + 0, 0x00000000); + NV_WI32(ramht + co + 4, 0x00000000); + obj->handle = ~0; + return; + } + + co += 8; + if (co == dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", + obj->channel, obj->handle); } static struct nouveau_object * @@ -457,7 +484,7 @@ nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) { nouveau_object_instance_free(dev, obj); if (obj->handle != ~0) - nouveau_hash_table_remove(dev, obj); + nouveau_ramht_remove(dev, obj); drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); } @@ -480,7 +507,7 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return DRM_ERR(ENOMEM); } - ret = nouveau_ht_object_insert(dev, channel, vram_handle, gpuobj); + ret = nouveau_ramht_insert(dev, channel, vram_handle, gpuobj); if (ret) { DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); return ret; @@ -500,7 +527,7 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return DRM_ERR(ENOMEM); } - ret = nouveau_ht_object_insert(dev, channel, tt_handle, gpuobj); + ret = nouveau_ramht_insert(dev, channel, tt_handle, gpuobj); if (ret) { DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); return ret; @@ -545,7 +572,7 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) if (!obj) return DRM_ERR(ENOMEM); - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { + if (nouveau_ramht_insert(dev, init.channel, init.handle, obj)) { nouveau_object_free(dev, obj); return DRM_ERR(ENOMEM); } diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index 783514a7..57010182 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -28,9 +28,9 @@ #include "drm.h" #include "nouveau_drv.h" -#define NV04_RAMFC (NV_RAMIN + dev_priv->ramfc_offset) -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val)) -#define RAMFC_RD(offset) NV_READ(fifoctx + NV04_RAMFC_##offset) +#define NV04_RAMFC dev_priv->ramfc_offset +#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV04_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_RI32(fifoctx + NV04_RAMFC_##offset) #define NV04_FIFO_CONTEXT_SIZE 32 int @@ -47,7 +47,7 @@ nv04_fifo_create_context(drm_device_t *dev, int channel) /* Clear RAMFC */ for (i=0; ipushbuf_base); @@ -72,7 +72,7 @@ nv04_fifo_destroy_context(drm_device_t *dev, int channel) fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); for (i=0; icmdbuf_obj->instance); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + fifoctx = dev_priv->ramfc_offset + channel*64; for (i=0; iramfc_offset + channel*64; + fifoctx = dev_priv->ramfc_offset + channel*64; for (i=0; iramfc_offset + channel*64; + fifoctx = dev_priv->ramfc_offset + channel*64; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); @@ -118,7 +118,7 @@ nv10_fifo_save_context(drm_device_t *dev, int channel) uint32_t fifoctx; uint32_t tmp; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + fifoctx = dev_priv->ramfc_offset + channel*64; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index 9d7afbe5..945fe228 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -28,8 +28,8 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val)) -#define RAMFC_RD(offset) NV_READ (fifoctx + NV40_RAMFC_##offset) +#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV40_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_RI32(fifoctx + NV40_RAMFC_##offset) int nv40_fifo_create_context(drm_device_t *dev, int channel) @@ -39,9 +39,9 @@ nv40_fifo_create_context(drm_device_t *dev, int channel) uint32_t fifoctx, grctx, pushbuf; int i; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + fifoctx = dev_priv->ramfc_offset + channel*128; for (i=0;i<128;i+=4) - NV_WRITE(fifoctx + i, 0); + NV_WI32(fifoctx + i, 0); grctx = nouveau_chip_instance_get(dev, chan->ramin_grctx); pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); @@ -73,9 +73,9 @@ nv40_fifo_destroy_context(drm_device_t *dev, int channel) uint32_t fifoctx; int i; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + fifoctx = dev_priv->ramfc_offset + channel*128; for (i=0;i<128;i+=4) - NV_WRITE(fifoctx + i, 0); + NV_WI32(fifoctx + i, 0); } int @@ -85,7 +85,7 @@ nv40_fifo_load_context(drm_device_t *dev, int channel) uint32_t fifoctx; uint32_t tmp, tmp2; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + fifoctx = dev_priv->ramfc_offset + channel*128; NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); @@ -146,7 +146,7 @@ nv40_fifo_save_context(drm_device_t *dev, int channel) uint32_t fifoctx; uint32_t tmp; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + fifoctx = dev_priv->ramfc_offset + channel*128; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); From 2dd85772aa4e134730f294d77b4ff030a175a4ab Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 28 Jun 2007 04:23:17 +1000 Subject: [PATCH 051/437] nouveau/nv10: Fix earlier NV1x chips Can't use nv04 code for them, since an extra field was inserted into RAMFC after DMA_PUT/GET. --- shared-core/nouveau_state.c | 7 ----- shared-core/nv10_fifo.c | 57 ++++++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 94d8081c..fa773d28 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -129,17 +129,10 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->graph.save_context = nv10_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; - if (dev_priv->chipset < 0x17) { - engine->fifo.create_context = nv04_fifo_create_context; - engine->fifo.destroy_context = nv04_fifo_destroy_context; - engine->fifo.load_context = nv04_fifo_load_context; - engine->fifo.save_context = nv04_fifo_save_context; - } else { engine->fifo.create_context = nv10_fifo_create_context; engine->fifo.destroy_context = nv10_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.save_context = nv10_fifo_save_context; - } break; case 0x20: engine->mc.init = nv04_mc_init; diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index 710a47f7..b84971de 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -30,20 +30,20 @@ #define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV10_RAMFC_##offset, (val)) #define RAMFC_RD(offset) NV_RI32(fifoctx + NV10_RAMFC_##offset) -#define NV10_FIFO_CONTEXT_SIZE 64 +#define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE) +#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) int nv10_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t fifoctx, pushbuf; + uint32_t fifoctx = NV10_RAMFC(channel), pushbuf; int i; pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - fifoctx = dev_priv->ramfc_offset + channel*64; - for (i=0; idev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV10_RAMFC(channel); int i; - fifoctx = dev_priv->ramfc_offset + channel*64; - for (i=0; idev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV10_RAMFC(channel); uint32_t tmp; - fifoctx = dev_priv->ramfc_offset + channel*64; - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); @@ -98,11 +95,19 @@ nv10_fifo_load_context(drm_device_t *dev, int channel) NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); - NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); - NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); - NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); - NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); - NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); + + if (dev_priv->chipset >= 0x17) { + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE, + RAMFC_RD(ACQUIRE_VALUE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, + RAMFC_RD(ACQUIRE_TIMESTAMP)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, + RAMFC_RD(ACQUIRE_TIMEOUT)); + NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE, + RAMFC_RD(SEMAPHORE)); + NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE, + RAMFC_RD(DMA_SUBROUTINE)); + } /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); @@ -115,11 +120,9 @@ int nv10_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV10_RAMFC(channel); uint32_t tmp; - fifoctx = dev_priv->ramfc_offset + channel*64; - RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); @@ -132,11 +135,19 @@ nv10_fifo_save_context(drm_device_t *dev, int channel) RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); - RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + + if (dev_priv->chipset >= 0x17) { + RAMFC_WR(ACQUIRE_VALUE, + NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, + NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT, + NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE, + NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE, + NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + } return 0; } From 1c32fecd6d2286af075976167c4887b9096e8312 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 28 Jun 2007 21:01:17 +1000 Subject: [PATCH 052/437] nouveau: Hack around possible Xv blit adaptor breakage --- shared-core/nouveau_fifo.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index f179af63..81dbfcda 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -341,6 +341,19 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, nouveau_fifo_free(dev, channel); return ret; } + + /* Temporary hack, to avoid breaking Xv on cards where the + * initial context value for 0x400710 doesn't have these bits + * set. Proper fix would be to find which object+method is + * responsible for modifying this state. + */ + if (dev_priv->chipset >= 0x10) { + uint32_t tmp; + tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; + NV_WRITE(NV10_PGRAPH_SURFACE, tmp); + tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; + NV_WRITE(NV10_PGRAPH_SURFACE, tmp); + } } NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001); From e26ec51146e77eec2a45f61c9506e9800fc2fba2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 29 Jun 2007 13:52:55 +1000 Subject: [PATCH 053/437] nouveau: small RAMFC cleanups --- shared-core/nv04_fifo.c | 17 ++++++++--------- shared-core/nv40_fifo.c | 20 ++++++++------------ 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index 57010182..bfae432e 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -28,10 +28,10 @@ #include "drm.h" #include "nouveau_drv.h" -#define NV04_RAMFC dev_priv->ramfc_offset #define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV04_RAMFC_##offset, (val)) #define RAMFC_RD(offset) NV_RI32(fifoctx + NV04_RAMFC_##offset) -#define NV04_FIFO_CONTEXT_SIZE 32 +#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) +#define NV04_RAMFC__SIZE 32 int nv04_fifo_create_context(drm_device_t *dev, int channel) @@ -39,14 +39,14 @@ nv04_fifo_create_context(drm_device_t *dev, int channel) drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; struct nouveau_object *pb = chan->cmdbuf_obj; - int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + uint32_t fifoctx = NV04_RAMFC(channel); int i; if (!pb || !pb->instance) return DRM_ERR(EINVAL); /* Clear RAMFC */ - for (i=0; idev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV04_RAMFC(channel); int i; - fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); - for (i=0; idev_private; - int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + uint32_t fifoctx = NV04_RAMFC(channel); uint32_t tmp; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); @@ -107,7 +106,7 @@ int nv04_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - int fifoctx = NV04_RAMFC + (channel * NV04_FIFO_CONTEXT_SIZE); + uint32_t fifoctx = NV04_RAMFC(channel); uint32_t tmp; RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index 945fe228..6f25349c 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -30,17 +30,18 @@ #define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV40_RAMFC_##offset, (val)) #define RAMFC_RD(offset) NV_RI32(fifoctx + NV40_RAMFC_##offset) +#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE)) +#define NV40_RAMFC__SIZE 128 int nv40_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t fifoctx, grctx, pushbuf; + uint32_t fifoctx = NV40_RAMFC(channel), grctx, pushbuf; int i; - fifoctx = dev_priv->ramfc_offset + channel*128; - for (i=0;i<128;i+=4) + for (i = 0; i < NV40_RAMFC__SIZE; i+=4) NV_WI32(fifoctx + i, 0); grctx = nouveau_chip_instance_get(dev, chan->ramin_grctx); @@ -70,11 +71,10 @@ void nv40_fifo_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV40_RAMFC(channel); int i; - fifoctx = dev_priv->ramfc_offset + channel*128; - for (i=0;i<128;i+=4) + for (i = 0; i < NV40_RAMFC__SIZE; i+=4) NV_WI32(fifoctx + i, 0); } @@ -82,11 +82,9 @@ int nv40_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV40_RAMFC(channel); uint32_t tmp, tmp2; - fifoctx = dev_priv->ramfc_offset + channel*128; - NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); @@ -143,11 +141,9 @@ int nv40_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; + uint32_t fifoctx = NV40_RAMFC(channel); uint32_t tmp; - fifoctx = dev_priv->ramfc_offset + channel*128; - RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); From 11ffe4632a097e3d579d084634eeccc63348249b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 22:20:13 -0700 Subject: [PATCH 054/437] Convert comment header of xgi_find_pcie_virt to kernel doc format. --- linux-core/xgi_pcie.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 8b024e4a..b29b083d 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -938,12 +938,13 @@ void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) return NULL; } -/* - address -- GE HW address - return -- CPU virtual address - - assume the CPU VAddr is continuous in not the same block -*/ +/** + * xgi_find_pcie_virt + * @address: GE HW address + * + * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not + * the same block + */ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) { struct list_head *used_list; From 9c85fb866dc7954092b7ffd0ca9f76eb5354ace8 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 22:26:39 -0700 Subject: [PATCH 055/437] Clean up debug log messages in xgi_find_pcie_block. --- linux-core/xgi_pcie.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b29b083d..b449a5fd 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -953,22 +953,18 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) unsigned long loc_in_pagetable; void *ret; - XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); - used_list = xgi_pcie_heap->used_list.next; - XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); - offset_in_page = address & (PAGE_SIZE - 1); - XGI_INFO - ("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", - address, PAGE_SIZE - 1, offset_in_page); + + XGI_INFO("begin (used_list = 0x%p, address = 0x%lx, " + "PAGE_SIZE - 1 = %lu, offset_in_page = %lu)\n", + used_list, address, PAGE_SIZE - 1, offset_in_page); while (used_list != &xgi_pcie_heap->used_list) { block = list_entry(used_list, struct xgi_pcie_block_s, list); - XGI_INFO("Jong_05292006-block=0x%px\n", block); - XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", - block->hw_addr); - XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + + XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", + block, block->hw_addr, block->size); if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) { @@ -978,21 +974,15 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) (void *)(block->page_table[loc_in_pagetable]. virt_addr + offset_in_page); - XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); - XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", - loc_in_pagetable); - XGI_INFO - ("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", - block->page_table[loc_in_pagetable].virt_addr); - XGI_INFO("Jong_05292006-offset_in_page=%d\n", - offset_in_page); - XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", - ret); + XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT); + XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n", + loc_in_pagetable, + block->page_table[loc_in_pagetable].virt_addr); + XGI_INFO("return 0x%p\n", ret); return ret; } else { - XGI_INFO - ("Jong_05292006-used_list = used_list->next;\n"); + XGI_INFO("used_list = used_list->next;\n"); used_list = used_list->next; } } From 8fa24c53f5851a2d3ad2da31ee56a4fd5abbd543 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 22:32:11 -0700 Subject: [PATCH 056/437] Minor clean up of variable declarations in xgi_find_pcie_virt. --- linux-core/xgi_pcie.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b449a5fd..d9da30e8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -947,30 +947,25 @@ void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) */ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) { - struct list_head *used_list; - xgi_pcie_block_t *block; - unsigned long offset_in_page; - unsigned long loc_in_pagetable; - void *ret; - - used_list = xgi_pcie_heap->used_list.next; - offset_in_page = address & (PAGE_SIZE - 1); + struct list_head *used_list = xgi_pcie_heap->used_list.next; + const unsigned long offset_in_page = address & (PAGE_SIZE - 1); XGI_INFO("begin (used_list = 0x%p, address = 0x%lx, " "PAGE_SIZE - 1 = %lu, offset_in_page = %lu)\n", used_list, address, PAGE_SIZE - 1, offset_in_page); while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block_s, list); + xgi_pcie_block_t *block = + list_entry(used_list, struct xgi_pcie_block_s, list); XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", block, block->hw_addr, block->size); if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) { - loc_in_pagetable = + const unsigned long loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT; - ret = + void *const ret = (void *)(block->page_table[loc_in_pagetable]. virt_addr + offset_in_page); From 475c1e67bacabb89c568c7482991451d223c53ae Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 23:40:36 -0700 Subject: [PATCH 057/437] Remove unused type 'struct xgi_pcie_list_s' / xgi_pcie_list_t. --- linux-core/xgi_pcie.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index 32c2b584..6e8e45b9 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -58,11 +58,6 @@ typedef struct xgi_pcie_block_s { unsigned long processID; } xgi_pcie_block_t; -typedef struct xgi_pcie_list_s { - xgi_pcie_block_t *head; - xgi_pcie_block_t *tail; -} xgi_pcie_list_t; - typedef struct xgi_pcie_heap_s { struct list_head free_list; struct list_head used_list; From 00f1a66f22d52c212bb9334a0103a4785af69bc1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 29 Jun 2007 12:50:12 +0200 Subject: [PATCH 058/437] Fence object reference / dereference cleanup. Buffer object dereference cleanup. Add a struct drm_device member to fence objects: This can simplify code, particularly in drivers. --- linux-core/drm_bo.c | 109 +++++++++++++++------------------- linux-core/drm_bo_move.c | 6 +- linux-core/drm_fence.c | 122 +++++++++++++++++++++++---------------- linux-core/drm_objects.h | 21 ++++--- linux-core/drm_vm.c | 3 +- 5 files changed, 134 insertions(+), 127 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f1ca0b44..ab257825 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -269,31 +269,25 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, int no_wait) { - - drm_fence_object_t *fence = bo->fence; int ret; DRM_ASSERT_LOCKED(&bo->mutex); - if (fence) { - drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + if (bo->fence) { + if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } if (no_wait) { return -EBUSY; } ret = - drm_fence_object_wait(dev, fence, lazy, ignore_signals, + drm_fence_object_wait(bo->fence, lazy, ignore_signals, bo->fence_type); if (ret) return ret; - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; - + drm_fence_usage_deref_unlocked(&bo->fence); } return 0; } @@ -321,10 +315,8 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) "Evicting buffer.\n"); } } - if (bo->fence) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } + if (bo->fence) + drm_fence_usage_deref_unlocked(&bo->fence); } return 0; } @@ -348,11 +340,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(dev, bo->fence, - bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } + if (bo->fence && drm_fence_object_signaled(bo->fence, + bo->fence_type, 0)) + drm_fence_usage_deref_unlocked(&bo->fence); if (bo->fence && remove_all) (void)drm_bo_expire_fence(bo, 0); @@ -383,7 +373,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) } if (list_empty(&bo->ddestroy)) { - drm_fence_object_flush(dev, bo->fence, bo->fence_type); + drm_fence_object_flush(bo->fence, bo->fence_type); list_add_tail(&bo->ddestroy, &bm->ddestroy); schedule_delayed_work(&bm->wq, ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); @@ -503,12 +493,15 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) +void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) { - DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + struct drm_buffer_object *tmp_bo = *bo; + bo = NULL; - if (atomic_dec_and_test(&bo->usage)) { - drm_bo_destroy_locked(bo); + DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); + + if (atomic_dec_and_test(&tmp_bo->usage)) { + drm_bo_destroy_locked(tmp_bo); } } @@ -520,17 +513,19 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); drm_bo_takedown_vm_locked(bo); - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) +static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) { - drm_device_t *dev = bo->dev; + struct drm_buffer_object *tmp_bo = *bo; + drm_device_t *dev = tmp_bo->dev; - if (atomic_dec_and_test(&bo->usage)) { + *bo = NULL; + if (atomic_dec_and_test(&tmp_bo->usage)) { mutex_lock(&dev->struct_mutex); - if (atomic_read(&bo->usage) == 0) - drm_bo_destroy_locked(bo); + if (atomic_read(&tmp_bo->usage) == 0) + drm_bo_destroy_locked(tmp_bo); mutex_unlock(&dev->struct_mutex); } } @@ -616,16 +611,15 @@ int drm_fence_buffer_objects(drm_file_t * priv, if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { count++; if (entry->fence) - drm_fence_usage_deref_locked(dev, entry->fence); - entry->fence = fence; - atomic_inc(&fence->usage); + drm_fence_usage_deref_locked(&entry->fence); + entry->fence = drm_fence_reference_locked(fence); DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); drm_bo_add_to_lru(entry); } mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); + drm_bo_usage_deref_locked(&entry); l = f_list.next; } DRM_DEBUG("Fenced %d buffers\n", count); @@ -742,7 +736,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); - drm_bo_usage_deref_unlocked(entry); + drm_bo_usage_deref_unlocked(&entry); if (ret) return ret; mutex_lock(&dev->struct_mutex); @@ -962,10 +956,8 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { - drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } return 1; @@ -984,16 +976,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { - drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } - drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE); - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } return 1; @@ -1190,7 +1179,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, drm_bo_fill_rep_arg(bo, rep); out: mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1216,7 +1205,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) } drm_remove_ref_object(priv, ro); - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(&bo); out: mutex_unlock(&dev->struct_mutex); return ret; @@ -1512,7 +1501,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1534,7 +1523,7 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, (void)drm_bo_busy(bo); drm_bo_fill_rep_arg(bo, rep); mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return 0; } @@ -1566,7 +1555,7 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, out: mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1651,7 +1640,7 @@ int drm_buffer_object_create(drm_device_t *dev, out_err: mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1728,7 +1717,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) mask & DRM_BO_FLAG_SHAREABLE); if (rep.ret) - drm_bo_usage_deref_unlocked(entry); + drm_bo_usage_deref_unlocked(&entry); if (rep.ret) break; @@ -1957,7 +1946,7 @@ restart: allow_errors); mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(entry); + drm_bo_usage_deref_locked(&entry); if (ret) return ret; @@ -1967,10 +1956,8 @@ restart: do_restart = ((next->prev != list) && (next->prev != prev)); - if (nentry != NULL && do_restart) { - drm_bo_usage_deref_locked(nentry); - nentry = NULL; - } + if (nentry != NULL && do_restart) + drm_bo_usage_deref_locked(&nentry); if (do_restart) goto restart; @@ -2365,7 +2352,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); list->map = NULL; list->user_token = 0ULL; - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(&bo); } static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 4f752065..8ef2a8ff 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -306,7 +306,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, INIT_LIST_HEAD(&fbo->p_mm_list); #endif - atomic_inc(&bo->fence->usage); + drm_fence_reference_unlocked(&fbo->fence, bo->fence); fbo->pinned_node = NULL; fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); @@ -339,7 +339,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, drm_buffer_object_t *old_obj; if (bo->fence) - drm_fence_usage_deref_unlocked(dev, bo->fence); + drm_fence_usage_deref_unlocked(&bo->fence); ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); @@ -396,7 +396,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); drm_bo_add_to_lru(old_obj); - drm_bo_usage_deref_locked(old_obj); + drm_bo_usage_deref_locked(&old_obj); mutex_unlock(&dev->struct_mutex); } diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index b5fc2235..ace70d51 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -124,56 +124,76 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_device_t * dev, - drm_fence_object_t * fence) +void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) { + struct drm_fence_object *tmp_fence = *fence; + struct drm_device *dev = tmp_fence->dev; drm_fence_manager_t *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); - - if (atomic_dec_and_test(&fence->usage)) { - drm_fence_unring(dev, &fence->ring); + *fence = NULL; + if (atomic_dec_and_test(&tmp_fence->usage)) { + drm_fence_unring(dev, &tmp_fence->ring); DRM_DEBUG("Destroyed a fence object 0x%08lx\n", - fence->base.hash.key); + tmp_fence->base.hash.key); atomic_dec(&fm->count); - BUG_ON(!list_empty(&fence->base.list)); - drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); + BUG_ON(!list_empty(&tmp_fence->base.list)); + drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } -void drm_fence_usage_deref_unlocked(drm_device_t * dev, - drm_fence_object_t * fence) +void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) { + struct drm_fence_object *tmp_fence = *fence; + struct drm_device *dev = tmp_fence->dev; drm_fence_manager_t *fm = &dev->fm; - if (atomic_dec_and_test(&fence->usage)) { + *fence = NULL; + if (atomic_dec_and_test(&tmp_fence->usage)) { mutex_lock(&dev->struct_mutex); - if (atomic_read(&fence->usage) == 0) { - drm_fence_unring(dev, &fence->ring); + if (atomic_read(&tmp_fence->usage) == 0) { + drm_fence_unring(dev, &tmp_fence->ring); atomic_dec(&fm->count); - BUG_ON(!list_empty(&fence->base.list)); - drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); + BUG_ON(!list_empty(&tmp_fence->base.list)); + drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); } } -static void drm_fence_object_destroy(drm_file_t * priv, - drm_user_object_t * base) +struct drm_fence_object +*drm_fence_reference_locked(struct drm_fence_object *src) +{ + DRM_ASSERT_LOCKED(&src->dev->struct_mutex); + + atomic_inc(&src->usage); + return src; +} + +void drm_fence_reference_unlocked(struct drm_fence_object **dst, + struct drm_fence_object *src) +{ + mutex_lock(&src->dev->struct_mutex); + *dst = src; + atomic_inc(&src->usage); + mutex_unlock(&src->dev->struct_mutex); +} + + +static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base) { - drm_device_t *dev = priv->head->dev; drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); - drm_fence_usage_deref_locked(dev, fence); + drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_device_t * dev, - drm_fence_object_t * fence, - uint32_t mask, int poke_flush) +int drm_fence_object_signaled(drm_fence_object_t * fence, + uint32_t mask, int poke_flush) { unsigned long flags; int signaled; + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; @@ -204,10 +224,10 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_device_t * dev, - drm_fence_object_t * fence, +int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type) { + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *fc = &fm->class[fence->class]; drm_fence_driver_t *driver = dev->driver->fence_driver; @@ -270,24 +290,23 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) mutex_unlock(&dev->struct_mutex); return; } - fence = list_entry(fc->ring.next, drm_fence_object_t, ring); - atomic_inc(&fence->usage); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); if (diff < driver->wrap_diff) { - drm_fence_object_flush(dev, fence, fence->type); + drm_fence_object_flush(fence, fence->type); } - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); } EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_device_t *dev, - drm_fence_object_t *fence, +static int drm_fence_lazy_wait(drm_fence_object_t *fence, int ignore_signals, uint32_t mask) { + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *fc = &fm->class[fence->class]; int signaled; @@ -296,13 +315,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev, do { DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, - (signaled = drm_fence_object_signaled(dev, fence, mask, 1))); + (signaled = drm_fence_object_signaled(fence, mask, 1))); if (signaled) return 0; if (time_after_eq(jiffies, _end)) break; } while (ret == -EINTR && ignore_signals); - if (drm_fence_object_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(fence, mask, 0)) return 0; if (time_after_eq(jiffies, _end)) ret = -EBUSY; @@ -317,10 +336,10 @@ static int drm_fence_lazy_wait(drm_device_t *dev, return 0; } -int drm_fence_object_wait(drm_device_t * dev, - drm_fence_object_t * fence, +int drm_fence_object_wait(drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask) { + struct drm_device *dev = fence->dev; drm_fence_driver_t *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; @@ -332,16 +351,16 @@ int drm_fence_object_wait(drm_device_t * dev, return -EINVAL; } - if (drm_fence_object_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(fence, mask, 0)) return 0; _end = jiffies + 3 * DRM_HZ; - drm_fence_object_flush(dev, fence, mask); + drm_fence_object_flush(fence, mask); if (lazy && driver->lazy_capable) { - ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask); + ret = drm_fence_lazy_wait(fence, ignore_signals, mask); if (ret) return ret; @@ -349,7 +368,7 @@ int drm_fence_object_wait(drm_device_t * dev, if (driver->has_irq(dev, fence->class, DRM_FENCE_TYPE_EXE)) { - ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + ret = drm_fence_lazy_wait(fence, ignore_signals, DRM_FENCE_TYPE_EXE); if (ret) return ret; @@ -357,13 +376,13 @@ int drm_fence_object_wait(drm_device_t * dev, if (driver->has_irq(dev, fence->class, mask & ~DRM_FENCE_TYPE_EXE)) { - ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + ret = drm_fence_lazy_wait(fence, ignore_signals, mask); if (ret) return ret; } } - if (drm_fence_object_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(fence, mask, 0)) return 0; /* @@ -375,7 +394,7 @@ int drm_fence_object_wait(drm_device_t * dev, #endif do { schedule(); - signaled = drm_fence_object_signaled(dev, fence, mask, 1); + signaled = drm_fence_object_signaled(fence, mask, 1); } while (!signaled && !time_after_eq(jiffies, _end)); if (!signaled) @@ -384,9 +403,10 @@ int drm_fence_object_wait(drm_device_t * dev, return 0; } -int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence, +int drm_fence_object_emit(drm_fence_object_t * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; drm_fence_class_manager_t *fc = &fm->class[fence->class]; @@ -436,9 +456,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, fence->submitted_flush = 0; fence->signaled = 0; fence->sequence = 0; + fence->dev = dev; write_unlock_irqrestore(&fm->lock, flags); if (fence_flags & DRM_FENCE_FLAG_EMIT) { - ret = drm_fence_object_emit(dev, fence, fence_flags, + ret = drm_fence_object_emit(fence, fence_flags, fence->class, type); } return ret; @@ -476,7 +497,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, return -ENOMEM; ret = drm_fence_object_init(dev, class, type, flags, fence); if (ret) { - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); return ret; } *c_fence = fence; @@ -533,8 +554,7 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_user_object_entry(uo, drm_fence_object_t, base); - atomic_inc(&fence->usage); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); mutex_unlock(&dev->struct_mutex); return fence; } @@ -568,7 +588,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) arg.flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); return ret; } arg.handle = fence->base.hash.key; @@ -603,14 +623,14 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; - ret = drm_fence_object_flush(dev, fence, arg.type); + ret = drm_fence_object_flush(fence, arg.type); break; case drm_fence_wait: fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; ret = - drm_fence_object_wait(dev, fence, + drm_fence_object_wait(fence, arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, 0, arg.type); break; @@ -619,7 +639,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, + ret = drm_fence_object_emit(fence, arg.flags, arg.class, arg.type); break; case drm_fence_buffers: @@ -647,7 +667,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) arg.type = fence->type; arg.signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 59c8902d..f82d6628 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -141,6 +141,7 @@ extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, typedef struct drm_fence_object { drm_user_object_t base; + struct drm_device *dev; atomic_t usage; /* @@ -196,17 +197,15 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(struct drm_device *dev, - drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(struct drm_device *dev, - drm_fence_object_t * fence, +extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); +extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(struct drm_device *dev, - drm_fence_object_t * fence); -extern void drm_fence_usage_deref_unlocked(struct drm_device *dev, - drm_fence_object_t * fence); -extern int drm_fence_object_wait(struct drm_device *dev, - drm_fence_object_t * fence, +extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); +extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, + struct drm_fence_object *src); +extern int drm_fence_object_wait(drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, @@ -441,7 +440,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev, unsigned long *bus_size); extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); +extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); extern int drm_fence_buffer_objects(drm_file_t * priv, struct list_head *list, uint32_t fence_flags, diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index f2c43508..72d63c10 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -840,7 +840,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) #ifdef DRM_ODD_MM_COMPAT drm_bo_delete_vma(bo, vma); #endif - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked((struct drm_buffer_object **) + &vma->vm_private_data); mutex_unlock(&dev->struct_mutex); } return; From a27af4c4a665864df09123f177ca7269e48f6171 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 29 Jun 2007 15:22:28 +0200 Subject: [PATCH 059/437] Avoid hitting BUG() for kernel-only fence objects. --- linux-core/drm_fence.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ace70d51..5215feb6 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -450,6 +450,12 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, write_lock_irqsave(&fm->lock, flags); INIT_LIST_HEAD(&fence->ring); + + /* + * Avoid hitting BUG() for kernel-only fence objects. + */ + + INIT_LIST_HEAD(&fence->base.list); fence->class = class; fence->type = type; fence->flush_mask = 0; From 33b8476dfb0f9b5045103c3a9781ba82bcae4a9d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 09:30:02 -0700 Subject: [PATCH 060/437] Fix return type of xgi_find_pcie_block. This function used to return 'void *', which was then cast to 'xgi_pcie_block_t *' at the only caller. I changed the return type to 'struct xgi_pcie_block_s *' and removed the explicit cast. --- linux-core/xgi_drv.c | 5 +---- linux-core/xgi_drv.h | 3 ++- linux-core/xgi_pcie.c | 3 ++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 75204283..a01b3c22 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -730,10 +730,7 @@ int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { xgi_down(info->pcie_sem); - block = - (xgi_pcie_block_t *) xgi_find_pcie_block(info, - XGI_VMA_OFFSET - (vma)); + block = xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); if (block == NULL) { XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 429719a7..5d76b632 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -353,7 +353,8 @@ extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t * alloc); extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); extern void xgi_pcie_heap_check(void); -extern void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address); +extern struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, + unsigned long address); extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index d9da30e8..1a4d8e12 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -906,7 +906,8 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) * given a bus address, fid the pcie mem block * uses the bus address as the key. */ -void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) +struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, + unsigned long address) { struct list_head *used_list; xgi_pcie_block_t *block; From 88328d4ef007c781874aafedfef59aae0d21a37c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 15:27:38 -0700 Subject: [PATCH 061/437] Eliminate structure typedefs Documentation/CodingStyle says that 'typedef struct foo foo_t' is evil. I tend to agree. Elminate all uses of such construct. --- linux-core/xgi_cmdlist.c | 26 ++++---- linux-core/xgi_cmdlist.h | 12 ++-- linux-core/xgi_drv.c | 108 ++++++++++++++++---------------- linux-core/xgi_drv.h | 130 +++++++++++++++++++-------------------- linux-core/xgi_fb.c | 110 ++++++++++++++++----------------- linux-core/xgi_fb.h | 31 ++-------- linux-core/xgi_linux.h | 20 +++--- linux-core/xgi_misc.c | 32 +++++----- linux-core/xgi_misc.h | 26 ++++---- linux-core/xgi_pcie.c | 116 +++++++++++++++++----------------- linux-core/xgi_pcie.h | 18 +++--- linux-core/xgi_regs.h | 96 ++++++++++++++--------------- 12 files changed, 349 insertions(+), 376 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 99be2145..2cdf714f 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -47,17 +47,17 @@ U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { FLUSH_2D }; -xgi_cmdring_info_t s_cmdring; +struct xgi_cmdring_info s_cmdring; -static void addFlush2D(xgi_info_t * info); -static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo); -static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter); +static void addFlush2D(struct xgi_info * info); +static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); +static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter); static void xgi_cmdlist_reset(void); -int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) +int xgi_cmdlist_initialize(struct xgi_info * info, U32 size) { - //xgi_mem_req_t mem_req; - xgi_mem_alloc_t mem_alloc; + //struct xgi_mem_req mem_req; + struct xgi_mem_alloc mem_alloc; //mem_req.size = size; @@ -76,7 +76,7 @@ int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) return 1; } -void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) +void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) { U32 beginPort; /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ @@ -238,7 +238,7 @@ void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) 2 - fb 3 - logout */ -void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo) +void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo) { #define STATE_CONSOLE 0 #define STATE_GRAPHIC 1 @@ -273,7 +273,7 @@ void xgi_cmdlist_reset(void) s_cmdring._cmdRingOffset = 0; } -void xgi_cmdlist_cleanup(xgi_info_t * info) +void xgi_cmdlist_cleanup(struct xgi_info * info) { if (s_cmdring._cmdRingBuffer != 0) { xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); @@ -283,7 +283,7 @@ void xgi_cmdlist_cleanup(xgi_info_t * info) } } -static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) +static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter) { static U32 s_triggerID = 1; @@ -295,7 +295,7 @@ static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) } } -static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) +static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) { // Convert the batch type to begin port ID switch (pCmdInfo->_firstBeginType) { @@ -313,7 +313,7 @@ static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) } } -static void addFlush2D(xgi_info_t * info) +static void addFlush2D(struct xgi_info * info) { U32 *flushBatchVirtAddr; U32 flushBatchHWAddr; diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 5fe1de71..b11511ff 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -57,20 +57,20 @@ typedef enum { AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE } CMD_SIZE; -typedef struct xgi_cmdring_info_s { +struct xgi_cmdring_info { U32 _cmdRingSize; U32 _cmdRingBuffer; U32 _cmdRingBusAddr; U32 _lastBatchStartAddr; U32 _cmdRingOffset; -} xgi_cmdring_info_t; +}; -extern int xgi_cmdlist_initialize(xgi_info_t * info, U32 size); +extern int xgi_cmdlist_initialize(struct xgi_info * info, U32 size); -extern void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo); +extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo); -extern void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo); +extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo); -extern void xgi_cmdlist_cleanup(xgi_info_t * info); +extern void xgi_cmdlist_cleanup(struct xgi_info * info); #endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index a01b3c22..44b003a8 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -53,14 +53,14 @@ int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ static int xgi_num_devices = 0; -xgi_info_t xgi_devices[XGI_MAX_DEVICES]; +struct xgi_info xgi_devices[XGI_MAX_DEVICES]; #if defined(XGI_PM_SUPPORT_APM) static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; #endif /* add one for the control device */ -xgi_info_t xgi_ctl_device; +struct xgi_info xgi_ctl_device; wait_queue_head_t xgi_ctl_waitqueue; #ifdef CONFIG_PROC_FS @@ -74,7 +74,7 @@ devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; struct list_head xgi_mempid_list; /* xgi_ functions.. do not take a state device parameter */ -static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info); +static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info); static void xgi_proc_create(void); static void xgi_proc_remove_all(struct proc_dir_entry *); static void xgi_proc_remove(void); @@ -110,7 +110,7 @@ unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); void xgi_kern_isr_bh(unsigned long); irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); -static void xgi_lock_init(xgi_info_t * info); +static void xgi_lock_init(struct xgi_info * info); #if defined(XGI_PM_SUPPORT_ACPI) int xgi_kern_acpi_standby(struct pci_dev *, u32); @@ -128,7 +128,7 @@ int xgi_kern_acpi_resume(struct pci_dev *); #define XGI_CHECK_PCI_CONFIG(xgi) \ xgi_check_pci_config(xgi, __LINE__) -static inline void xgi_check_pci_config(xgi_info_t * info, int line) +static inline void xgi_check_pci_config(struct xgi_info * info, int line) { unsigned short cmd, flag = 0; @@ -208,7 +208,7 @@ static struct pci_driver xgi_pci_driver = { */ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) { - xgi_info_t *info; + struct xgi_info *info; if ((dev->vendor != PCI_VENDOR_ID_XGI) || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { @@ -361,8 +361,8 @@ void xgi_kern_vma_open(struct vm_area_struct *vma) vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); if (XGI_VMA_PRIVATE(vma)) { - xgi_pcie_block_t *block = - (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = + (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); XGI_ATOMIC_INC(block->use_count); } } @@ -373,8 +373,8 @@ void xgi_kern_vma_release(struct vm_area_struct *vma) vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); if (XGI_VMA_PRIVATE(vma)) { - xgi_pcie_block_t *block = - (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = + (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); XGI_ATOMIC_DEC(block->use_count); /* @@ -393,7 +393,7 @@ void xgi_kern_vma_release(struct vm_area_struct *vma) struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); struct page *page = NOPAGE_SIGBUS; unsigned long offset = 0; unsigned long page_addr = 0; @@ -436,7 +436,7 @@ struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, unsigned long address, int write_access) { - xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); struct page *page = NOPAGE_SIGBUS; unsigned long offset = 0; unsigned long page_addr = 0; @@ -496,15 +496,15 @@ static struct file_operations xgi_fops = { .release = xgi_kern_release, }; -static xgi_file_private_t *xgi_alloc_file_private(void) +static struct xgi_file_private *xgi_alloc_file_private(void) { - xgi_file_private_t *fp; + struct xgi_file_private *fp; - XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); + XGI_KMALLOC(fp, sizeof(struct xgi_file_private)); if (!fp) return NULL; - memset(fp, 0, sizeof(xgi_file_private_t)); + memset(fp, 0, sizeof(struct xgi_file_private)); /* initialize this file's event queue */ init_waitqueue_head(&fp->wait_queue); @@ -514,17 +514,17 @@ static xgi_file_private_t *xgi_alloc_file_private(void) return fp; } -static void xgi_free_file_private(xgi_file_private_t * fp) +static void xgi_free_file_private(struct xgi_file_private * fp) { if (fp == NULL) return; - XGI_KFREE(fp, sizeof(xgi_file_private_t)); + XGI_KFREE(fp, sizeof(struct xgi_file_private)); } int xgi_kern_open(struct inode *inode, struct file *filp) { - xgi_info_t *info = NULL; + struct xgi_info *info = NULL; int dev_num; int result = 0, status; @@ -621,7 +621,7 @@ int xgi_kern_open(struct inode *inode, struct file *filp) int xgi_kern_release(struct inode *inode, struct file *filp) { - xgi_info_t *info = XGI_INFO_FROM_FP(filp); + struct xgi_info *info = XGI_INFO_FROM_FP(filp); XGI_CHECK_PCI_CONFIG(info); @@ -674,8 +674,8 @@ int xgi_kern_release(struct inode *inode, struct file *filp) int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) { //struct inode *inode = INODE_FROM_FP(filp); - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - xgi_pcie_block_t *block; + struct xgi_info *info = XGI_INFO_FROM_FP(filp); + struct xgi_pcie_block *block; int pages = 0; unsigned long prot; @@ -792,8 +792,8 @@ int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) { - xgi_file_private_t *fp; - xgi_info_t *info; + struct xgi_file_private *fp; + struct xgi_info *info; unsigned int mask = 0; unsigned long eflags; @@ -828,8 +828,8 @@ unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) int xgi_kern_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - xgi_info_t *info; - xgi_mem_alloc_t *alloc = NULL; + struct xgi_info *info; + struct xgi_mem_alloc *alloc = NULL; int status = 0; void *arg_copy; @@ -880,21 +880,21 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, } else XGI_INFO("Jong-copy_from_user-OK! \n"); - alloc = (xgi_mem_alloc_t *) arg_copy; + alloc = (struct xgi_mem_alloc *) arg_copy; XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size); switch (_IOC_NR(cmd)) { case XGI_ESC_DEVICE_INFO: XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); - xgi_get_device_info(info, (struct xgi_chip_info_s *)arg_copy); + xgi_get_device_info(info, (struct xgi_chip_info *)arg_copy); break; case XGI_ESC_POST_VBIOS: XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); break; case XGI_ESC_FB_ALLOC: XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); + xgi_fb_alloc(info, (struct xgi_mem_req *)arg_copy, alloc); break; case XGI_ESC_FB_FREE: XGI_INFO("Jong-xgi_ioctl_fb_free \n"); @@ -906,8 +906,8 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_PCIE_ALLOC: XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, ((xgi_mem_req_t *) arg_copy)->size, - ((xgi_mem_req_t *) arg_copy)->owner, alloc); + xgi_pcie_alloc(info, ((struct xgi_mem_req *) arg_copy)->size, + ((struct xgi_mem_req *) arg_copy)->owner, alloc); break; case XGI_ESC_PCIE_FREE: XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", @@ -920,15 +920,15 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_GET_SCREEN_INFO: XGI_INFO("Jong-xgi_get_screen_info \n"); - xgi_get_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + xgi_get_screen_info(info, (struct xgi_screen_info *)arg_copy); break; case XGI_ESC_PUT_SCREEN_INFO: XGI_INFO("Jong-xgi_put_screen_info \n"); - xgi_put_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + xgi_put_screen_info(info, (struct xgi_screen_info *)arg_copy); break; case XGI_ESC_MMIO_INFO: XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); - xgi_get_mmio_info(info, (struct xgi_mmio_info_s *)arg_copy); + xgi_get_mmio_info(info, (struct xgi_mmio_info *)arg_copy); break; case XGI_ESC_GE_RESET: XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); @@ -936,7 +936,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_SAREA_INFO: XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); - xgi_sarea_info(info, (struct xgi_sarea_info_s *)arg_copy); + xgi_sarea_info(info, (struct xgi_sarea_info *)arg_copy); break; case XGI_ESC_DUMP_REGISTER: XGI_INFO("Jong-xgi_ioctl_dump_register \n"); @@ -945,12 +945,12 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, case XGI_ESC_DEBUG_INFO: XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); xgi_restore_registers(info); - //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); - //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + //xgi_write_pcie_mem(info, (struct xgi_mem_req *) arg_copy); + //xgi_read_pcie_mem(info, (struct xgi_mem_req *) arg_copy); break; case XGI_ESC_SUBMIT_CMDLIST: XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); - xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); + xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy); break; case XGI_ESC_TEST_RWINKERNEL: XGI_INFO("Jong-xgi_test_rwinkernel \n"); @@ -958,11 +958,11 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_STATE_CHANGE: XGI_INFO("Jong-xgi_state_change \n"); - xgi_state_change(info, (xgi_state_info_t *) arg_copy); + xgi_state_change(info, (struct xgi_state_info *) arg_copy); break; case XGI_ESC_CPUID: XGI_INFO("Jong-XGI_ESC_CPUID \n"); - xgi_get_cpu_id((struct cpu_info_s *)arg_copy); + xgi_get_cpu_id((struct cpu_info *)arg_copy); break; default: XGI_INFO("Jong-xgi_ioctl_default \n"); @@ -985,7 +985,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, */ int xgi_kern_ctl_open(struct inode *inode, struct file *filp) { - xgi_info_t *info = &xgi_ctl_device; + struct xgi_info *info = &xgi_ctl_device; int rc = 0; @@ -1011,7 +1011,7 @@ int xgi_kern_ctl_open(struct inode *inode, struct file *filp) int xgi_kern_ctl_close(struct inode *inode, struct file *filp) { - xgi_info_t *info = XGI_INFO_FROM_FP(filp); + struct xgi_info *info = XGI_INFO_FROM_FP(filp); XGI_INFO("Jong-xgi_kern_ctl_close\n"); @@ -1031,7 +1031,7 @@ int xgi_kern_ctl_close(struct inode *inode, struct file *filp) unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) { - //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; + //struct xgi_info *info = XGI_INFO_FROM_FP(filp);; unsigned int ret = 0; if (!(filp->f_flags & O_NONBLOCK)) { @@ -1073,7 +1073,7 @@ static u8 xgi_find_pcie_capability(struct pci_dev *dev) return 0; } -static struct pci_dev *xgi_get_pci_device(xgi_info_t * info) +static struct pci_dev *xgi_get_pci_device(struct xgi_info * info) { struct pci_dev *dev; @@ -1095,8 +1095,8 @@ int xgi_kern_read_card_info(char *page, char **start, off_t off, char *type; int len = 0; - xgi_info_t *info; - info = (xgi_info_t *) data; + struct xgi_info *info; + info = (struct xgi_info *) data; dev = xgi_get_pci_device(info); if (!dev) @@ -1143,8 +1143,8 @@ static void xgi_proc_create(void) struct proc_dir_entry *entry; struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; - xgi_info_t *info; - xgi_info_t *xgi_max_devices; + struct xgi_info *info; + struct xgi_info *xgi_max_devices; /* world readable directory */ int flags = S_IFDIR | S_IRUGO | S_IXUGO; @@ -1268,7 +1268,7 @@ static void xgi_proc_remove(void) */ irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) { - xgi_info_t *info = (xgi_info_t *) dev_id; + struct xgi_info *info = (struct xgi_info *) dev_id; u32 need_to_run_bottom_half = 0; //XGI_INFO("xgi_kern_isr \n"); @@ -1286,7 +1286,7 @@ irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) void xgi_kern_isr_bh(unsigned long data) { - xgi_info_t *info = (xgi_info_t *) data; + struct xgi_info *info = (struct xgi_info *) data; XGI_INFO("xgi_kern_isr_bh \n"); @@ -1295,7 +1295,7 @@ void xgi_kern_isr_bh(unsigned long data) XGI_CHECK_PCI_CONFIG(info); } -static void xgi_lock_init(xgi_info_t * info) +static void xgi_lock_init(struct xgi_info * info) { if (info == NULL) return; @@ -1309,7 +1309,7 @@ static void xgi_lock_init(xgi_info_t * info) XGI_ATOMIC_SET(info->use_count, 0); } -static void xgi_dev_init(xgi_info_t * info) +static void xgi_dev_init(struct xgi_info * info) { struct pci_dev *pdev = NULL; struct xgi_dev *dev; @@ -1354,7 +1354,7 @@ static void xgi_dev_init(xgi_info_t * info) static int __init xgi_init_module(void) { - xgi_info_t *info = &xgi_devices[xgi_num_devices]; + struct xgi_info *info = &xgi_devices[xgi_num_devices]; int i, result; XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); @@ -1421,7 +1421,7 @@ static int __init xgi_init_module(void) /* init the xgi control device */ { - xgi_info_t *info_ctl = &xgi_ctl_device; + struct xgi_info *info_ctl = &xgi_ctl_device; xgi_lock_init(info_ctl); } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 5d76b632..32ee5e81 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -93,26 +93,26 @@ /* need a fake device number for control device; just to flag it for msgs */ #define XGI_CONTROL_DEVICE_NUMBER 100 -typedef struct { +struct xgi_aperture { U32 base; // pcie base is different from fb base U32 size; U8 *vbase; -} xgi_aperture_t; +}; -typedef struct xgi_screen_info_s { +struct xgi_screen_info { U32 scrn_start; U32 scrn_xres; U32 scrn_yres; U32 scrn_bpp; U32 scrn_pitch; -} xgi_screen_info_t; +}; -typedef struct xgi_sarea_info_s { +struct xgi_sarea_info { U32 bus_addr; U32 size; -} xgi_sarea_info_t; +}; -typedef struct xgi_info_s { +struct xgi_info { struct pci_dev *dev; int flags; int device_number; @@ -123,11 +123,11 @@ typedef struct xgi_info_s { U8 revision_id; /* physical characteristics */ - xgi_aperture_t mmio; - xgi_aperture_t fb; - xgi_aperture_t pcie; - xgi_screen_info_t scrn_info; - xgi_sarea_info_t sarea_info; + struct xgi_aperture mmio; + struct xgi_aperture fb; + struct xgi_aperture pcie; + struct xgi_screen_info scrn_info; + struct xgi_sarea_info sarea_info; /* look up table parameters */ U32 *lut_base; @@ -150,18 +150,18 @@ typedef struct xgi_info_s { struct semaphore info_sem; struct semaphore fb_sem; struct semaphore pcie_sem; -} xgi_info_t; +}; -typedef struct xgi_ioctl_post_vbios { +struct xgi_ioctl_post_vbios { U32 bus; U32 slot; -} xgi_ioctl_post_vbios_t; +}; -typedef enum xgi_mem_location_s { +enum xgi_mem_location { NON_LOCAL = 0, LOCAL = 1, INVALID = 0x7fffffff -} xgi_mem_location_t; +}; enum PcieOwner { PCIE_2D = 0, @@ -176,23 +176,23 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -typedef struct xgi_mem_req_s { - xgi_mem_location_t location; +struct xgi_mem_req { + enum xgi_mem_location location; unsigned long size; unsigned long is_front; enum PcieOwner owner; unsigned long pid; -} xgi_mem_req_t; +}; -typedef struct xgi_mem_alloc_s { - xgi_mem_location_t location; +struct xgi_mem_alloc { + enum xgi_mem_location location; unsigned long size; unsigned long bus_addr; unsigned long hw_addr; unsigned long pid; -} xgi_mem_alloc_t; +}; -typedef struct xgi_chip_info_s { +struct xgi_chip_info { U32 device_id; char device_name[32]; U32 vendor_id; @@ -200,17 +200,17 @@ typedef struct xgi_chip_info_s { U32 fb_size; U32 sarea_bus_addr; U32 sarea_size; -} xgi_chip_info_t; +}; -typedef struct xgi_opengl_cmd_s { +struct xgi_opengl_cmd { U32 cmd; -} xgi_opengl_cmd_t; +}; -typedef struct xgi_mmio_info_s { - xgi_opengl_cmd_t cmd_head; +struct xgi_mmio_info { + struct xgi_opengl_cmd cmd_head; void *mmioBase; int size; -} xgi_mmio_info_t; +}; typedef enum { BTYPE_2D = 0, @@ -220,33 +220,33 @@ typedef enum { BTYPE_NONE = 0x7fffffff } BATCH_TYPE; -typedef struct xgi_cmd_info_s { +struct xgi_cmd_info { BATCH_TYPE _firstBeginType; U32 _firstBeginAddr; U32 _firstSize; U32 _curDebugID; U32 _lastBeginAddr; U32 _beginCount; -} xgi_cmd_info_t; +}; -typedef struct xgi_state_info_s { +struct xgi_state_info { U32 _fromState; U32 _toState; -} xgi_state_info_t; +}; -typedef struct cpu_info_s { +struct cpu_info { U32 _eax; U32 _ebx; U32 _ecx; U32 _edx; -} cpu_info_t; +}; -typedef struct xgi_mem_pid_s { +struct xgi_mem_pid { struct list_head list; - xgi_mem_location_t location; + enum xgi_mem_location location; unsigned long bus_addr; unsigned long pid; -} xgi_mem_pid_t; +}; /* * Ioctl definitions @@ -278,32 +278,32 @@ typedef struct xgi_mem_pid_s { #define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) #define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) #define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req) #define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) #define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req) #define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info) #define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info) #define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) #define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) #define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) #define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, struct cpu_info) #define XGI_IOCTL_MAXNR 30 /* @@ -338,28 +338,28 @@ typedef struct xgi_mem_pid_s { (((offset) >= (info)->pcie.base) \ && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) -extern int xgi_fb_heap_init(xgi_info_t * info); -extern void xgi_fb_heap_cleanup(xgi_info_t * info); +extern int xgi_fb_heap_init(struct xgi_info * info); +extern void xgi_fb_heap_cleanup(struct xgi_info * info); -extern void xgi_fb_alloc(xgi_info_t * info, xgi_mem_req_t * req, - xgi_mem_alloc_t * alloc); -extern void xgi_fb_free(xgi_info_t * info, unsigned long offset); -extern void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt); +extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_req * req, + struct xgi_mem_alloc * alloc); +extern void xgi_fb_free(struct xgi_info * info, unsigned long offset); +extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt); -extern int xgi_pcie_heap_init(xgi_info_t * info); -extern void xgi_pcie_heap_cleanup(xgi_info_t * info); +extern int xgi_pcie_heap_init(struct xgi_info * info); +extern void xgi_pcie_heap_cleanup(struct xgi_info * info); -extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, - enum PcieOwner owner, xgi_mem_alloc_t * alloc); -extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); +extern void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, + enum PcieOwner owner, struct xgi_mem_alloc * alloc); +extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); extern void xgi_pcie_heap_check(void); -extern struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, +extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address); -extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); +extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); -extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); -extern void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); +extern void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); +extern void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); -extern void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address); +extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index fab99ae2..56cc589b 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -33,19 +33,19 @@ #define XGI_FB_HEAP_START 0x1000000 -static xgi_mem_heap_t *xgi_fb_heap; -static kmem_cache_t *xgi_fb_cache_block = NULL; +static struct xgi_mem_heap *xgi_fb_heap; +static struct kmem_cache *xgi_fb_cache_block = NULL; extern struct list_head xgi_mempid_list; -static xgi_mem_block_t *xgi_mem_new_node(void); -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, unsigned long size); -static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset); +static struct xgi_mem_block *xgi_mem_new_node(void); +static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size); +static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset); -void xgi_fb_alloc(xgi_info_t * info, - xgi_mem_req_t * req, xgi_mem_alloc_t * alloc) +void xgi_fb_alloc(struct xgi_info * info, + struct xgi_mem_req * req, struct xgi_mem_alloc * alloc) { - xgi_mem_block_t *block; - xgi_mem_pid_t *mempid_block; + struct xgi_mem_block *block; + struct xgi_mem_pid *mempid_block; if (req->is_front) { alloc->location = LOCAL; @@ -74,7 +74,7 @@ void xgi_fb_alloc(xgi_info_t * info, /* manage mempid */ mempid_block = - kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); mempid_block->location = LOCAL; mempid_block->bus_addr = alloc->bus_addr; mempid_block->pid = alloc->pid; @@ -90,12 +90,12 @@ void xgi_fb_alloc(xgi_info_t * info, } } -void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) +void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) { - xgi_mem_block_t *block; + struct xgi_mem_block *block; unsigned long offset = bus_addr - info->fb.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; + struct xgi_mem_pid *mempid_block; + struct xgi_mem_pid *mempid_freeblock = NULL; struct list_head *mempid_list; if (offset < 0) { @@ -114,7 +114,7 @@ void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) mempid_list = xgi_mempid_list.next; while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) { mempid_freeblock = mempid_block; @@ -132,11 +132,11 @@ void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) } } -int xgi_fb_heap_init(xgi_info_t * info) +int xgi_fb_heap_init(struct xgi_info * info) { - xgi_mem_block_t *block; + struct xgi_mem_block *block; - xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); + xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL); if (!xgi_fb_heap) { XGI_ERROR("xgi_fb_heap alloc failed\n"); return 0; @@ -147,7 +147,7 @@ int xgi_fb_heap_init(xgi_info_t * info) INIT_LIST_HEAD(&xgi_fb_heap->sort_list); xgi_fb_cache_block = - kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), 0, + kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (NULL == xgi_fb_cache_block) { @@ -156,7 +156,7 @@ int xgi_fb_heap_init(xgi_info_t * info) } block = - (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); if (!block) { XGI_ERROR("kmem_cache_alloc failed\n"); @@ -190,10 +190,10 @@ int xgi_fb_heap_init(xgi_info_t * info) return 0; } -void xgi_fb_heap_cleanup(xgi_info_t * info) +void xgi_fb_heap_cleanup(struct xgi_info * info) { struct list_head *free_list, *temp; - xgi_mem_block_t *block; + struct xgi_mem_block *block; int i; if (xgi_fb_heap) { @@ -202,7 +202,7 @@ void xgi_fb_heap_cleanup(xgi_info_t * info) temp = free_list->next; while (temp != free_list) { block = - list_entry(temp, struct xgi_mem_block_s, + list_entry(temp, struct xgi_mem_block, list); temp = temp->next; @@ -225,12 +225,12 @@ void xgi_fb_heap_cleanup(xgi_info_t * info) } } -static xgi_mem_block_t *xgi_mem_new_node(void) +static struct xgi_mem_block *xgi_mem_new_node(void) { - xgi_mem_block_t *block; + struct xgi_mem_block *block; block = - (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); if (!block) { XGI_ERROR("kmem_cache_alloc failed\n"); @@ -241,23 +241,23 @@ static xgi_mem_block_t *xgi_mem_new_node(void) } #if 0 -static void xgi_mem_insert_node_after(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block); -static void xgi_mem_insert_node_before(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block); -static void xgi_mem_insert_node_head(xgi_mem_list_t * list, - xgi_mem_block_t * block); -static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, - xgi_mem_block_t * block); -static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block); +static void xgi_mem_insert_node_after(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block); +static void xgi_mem_insert_node_before(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block); +static void xgi_mem_insert_node_head(struct xgi_mem_list * list, + struct xgi_mem_block * block); +static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, + struct xgi_mem_block * block); +static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block); /* * insert node:block after node:current */ -static void xgi_mem_insert_node_after(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block) +static void xgi_mem_insert_node_after(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block) { block->prev = current; block->next = current->next; @@ -273,9 +273,9 @@ static void xgi_mem_insert_node_after(xgi_mem_list_t * list, /* * insert node:block before node:current */ -static void xgi_mem_insert_node_before(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block) +static void xgi_mem_insert_node_before(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block) { block->prev = current->prev; block->next = current; @@ -286,7 +286,7 @@ static void xgi_mem_insert_node_before(xgi_mem_list_t * list, block->prev->next = block; } } -void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) +void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block) { block->next = list->head; block->prev = NULL; @@ -299,8 +299,8 @@ void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) list->head = block; } -static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, - xgi_mem_block_t * block) +static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, + struct xgi_mem_block * block) { block->next = NULL; block->prev = list->tail; @@ -312,7 +312,7 @@ static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, list->tail = block; } -static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) +static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block) { if (block == list->head) { list->head = block->next; @@ -331,11 +331,11 @@ static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) block->next = block->prev = NULL; } #endif -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, +static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long originalSize) { struct list_head *free_list; - xgi_mem_block_t *block, *free_block, *used_block; + struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -358,7 +358,7 @@ static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, while (free_list != &xgi_fb_heap->free_list) { XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_mem_block_s, list); + block = list_entry(free_list, struct xgi_mem_block, list); if (size <= block->size) { break; } @@ -406,18 +406,18 @@ static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, return (used_block); } -static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) +static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset) { struct list_head *free_list, *used_list; - xgi_mem_block_t *used_block = NULL, *block = NULL; - xgi_mem_block_t *prev, *next; + struct xgi_mem_block *used_block = NULL, *block = NULL; + struct xgi_mem_block *prev, *next; unsigned long upper; unsigned long lower; used_list = xgi_fb_heap->used_list.next; while (used_list != &xgi_fb_heap->used_list) { - block = list_entry(used_list, struct xgi_mem_block_s, list); + block = list_entry(used_list, struct xgi_mem_block, list); if (block->offset == offset) { break; } @@ -441,7 +441,7 @@ static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) free_list = xgi_fb_heap->free_list.next; while (free_list != &xgi_fb_heap->free_list) { - block = list_entry(free_list, struct xgi_mem_block_s, list); + block = list_entry(free_list, struct xgi_mem_block, list); if (block->offset == upper) { next = block; diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h index ae078ae0..363c8bc8 100644 --- a/linux-core/xgi_fb.h +++ b/linux-core/xgi_fb.h @@ -29,42 +29,19 @@ #ifndef _XGI_FB_H_ #define _XGI_FB_H_ -typedef struct xgi_mem_block_s { +struct xgi_mem_block { struct list_head list; unsigned long offset; unsigned long size; atomic_t use_count; -} xgi_mem_block_t; +}; -typedef struct xgi_mem_heap_s { +struct xgi_mem_heap { struct list_head free_list; struct list_head used_list; struct list_head sort_list; unsigned long max_freesize; spinlock_t lock; -} xgi_mem_heap_t; - -#if 0 -typedef struct xgi_mem_block_s { - struct xgi_mem_block_s *next; - struct xgi_mem_block_s *prev; - unsigned long offset; - unsigned long size; - atomic_t use_count; -} xgi_mem_block_t; - -typedef struct xgi_mem_list_s { - xgi_mem_block_t *head; - xgi_mem_block_t *tail; -} xgi_mem_list_t; - -typedef struct xgi_mem_heap_s { - xgi_mem_list_t *free_list; - xgi_mem_list_t *used_list; - xgi_mem_list_t *sort_list; - unsigned long max_freesize; - spinlock_t lock; -} xgi_mem_heap_t; -#endif +}; #endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 465feb3c..2602b0f5 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -415,10 +415,10 @@ static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) free_pages(ptr, order); \ } -typedef struct xgi_pte_s { +struct xgi_pte { unsigned long phys_addr; unsigned long virt_addr; -} xgi_pte_t; +}; /* * AMD Athlon processors expose a subtle bug in the Linux @@ -427,12 +427,12 @@ typedef struct xgi_pte_s { * 2.4.20 is the first kernel to address it properly. The * page_attr API provides the means to solve the problem. */ -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr) { struct page *page = virt_to_page(__va(page_ptr->phys_addr)); change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); } -static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) +static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr) { struct page *page = virt_to_page(__va(page_ptr->phys_addr)); change_page_attr(page, 1, PAGE_KERNEL); @@ -453,20 +453,16 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) #define XGILockPage(page) SetPageLocked(page) #define XGIUnlockPage(page) ClearPageLocked(page) -/* - * hide a pointer to struct xgi_info_t in a file-private info - */ - -typedef struct { - void *info; +struct xgi_file_private { + struct xgi_info *info; U32 num_events; spinlock_t fp_lock; wait_queue_head_t wait_queue; -} xgi_file_private_t; +}; #define FILE_PRIVATE(filp) ((filp)->private_data) -#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) +#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp)) /* for the card devices */ #define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 8d0e81b6..68c5ca20 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -32,7 +32,7 @@ #include "xgi_regs.h" #include "xgi_pcie.h" -void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) +void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) { req->device_id = info->device_id; req->device_name[0] = 'x'; @@ -46,13 +46,13 @@ void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) req->sarea_size = info->sarea_info.size; } -void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req) +void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req) { req->mmioBase = (void *)info->mmio.base; req->size = info->mmio.size; } -void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req) { info->scrn_info.scrn_start = req->scrn_start; info->scrn_info.scrn_xres = req->scrn_xres; @@ -71,7 +71,7 @@ void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); } -void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req) { req->scrn_start = info->scrn_info.scrn_start; req->scrn_xres = info->scrn_info.scrn_xres; @@ -89,13 +89,13 @@ void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) req->scrn_yres, req->scrn_bpp, req->scrn_pitch); } -void xgi_ge_reset(xgi_info_t * info) +void xgi_ge_reset(struct xgi_info * info) { xgi_disable_ge(info); xgi_enable_ge(info); } -void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) +void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) { info->sarea_info.bus_addr = req->bus_addr; info->sarea_info.size = req->size; @@ -111,7 +111,7 @@ void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) static U32 s_invalid_begin = 0; -BOOL xgi_ge_irq_handler(xgi_info_t * info) +BOOL xgi_ge_irq_handler(struct xgi_info * info) { volatile U8 *mmio_vbase = info->mmio.vbase; volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); @@ -287,7 +287,7 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) return FALSE; } -BOOL xgi_crt_irq_handler(xgi_info_t * info) +BOOL xgi_crt_irq_handler(struct xgi_info * info) { BOOL ret = FALSE; U8 save_3ce = bReadReg(0x3ce); @@ -311,7 +311,7 @@ BOOL xgi_crt_irq_handler(xgi_info_t * info) return (ret); } -BOOL xgi_dvi_irq_handler(xgi_info_t * info) +BOOL xgi_dvi_irq_handler(struct xgi_info * info) { BOOL ret = FALSE; U8 save_3ce = bReadReg(0x3ce); @@ -344,7 +344,7 @@ BOOL xgi_dvi_irq_handler(xgi_info_t * info) return (ret); } -void xgi_dump_register(xgi_info_t * info) +void xgi_dump_register(struct xgi_info * info) { int i, j; unsigned char temp; @@ -518,13 +518,13 @@ void xgi_dump_register(xgi_info_t * info) } } -void xgi_restore_registers(xgi_info_t * info) +void xgi_restore_registers(struct xgi_info * info) { bOut3x5(0x13, 0); bOut3x5(0x8b, 2); } -void xgi_waitfor_pci_idle(xgi_info_t * info) +void xgi_waitfor_pci_idle(struct xgi_info * info) { #define WHOLD_GE_STATUS 0x2800 #define IDLE_MASK ~0x90200000 @@ -539,7 +539,7 @@ void xgi_waitfor_pci_idle(xgi_info_t * info) } } -int xgi_get_cpu_id(struct cpu_info_s *arg) +int xgi_get_cpu_id(struct cpu_info *arg) { int op = arg->_eax; __asm__("cpuid":"=a"(arg->_eax), @@ -554,9 +554,9 @@ int xgi_get_cpu_id(struct cpu_info_s *arg) /*memory collect function*/ extern struct list_head xgi_mempid_list; -void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) +void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) { - xgi_mem_pid_t *mempid_block; + struct xgi_mem_pid *mempid_block; struct list_head *mempid_list; struct task_struct *p, *find; unsigned int cnt = 0; @@ -565,7 +565,7 @@ void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); mempid_list = mempid_list->next; find = NULL; diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 37120aaa..0ebbe7e8 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -29,19 +29,19 @@ #ifndef _XGI_MISC_H_ #define _XGI_MISC_H_ -extern void xgi_dump_register(xgi_info_t * info); -extern void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req); -extern void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req); -extern void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req); -extern void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req); -extern void xgi_ge_reset(xgi_info_t * info); -extern void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req); -extern int xgi_get_cpu_id(struct cpu_info_s *arg); +extern void xgi_dump_register(struct xgi_info * info); +extern void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req); +extern void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req); +extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req); +extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req); +extern void xgi_ge_reset(struct xgi_info * info); +extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); +extern int xgi_get_cpu_id(struct cpu_info *arg); -extern void xgi_restore_registers(xgi_info_t * info); -extern BOOL xgi_ge_irq_handler(xgi_info_t * info); -extern BOOL xgi_crt_irq_handler(xgi_info_t * info); -extern BOOL xgi_dvi_irq_handler(xgi_info_t * info); -extern void xgi_waitfor_pci_idle(xgi_info_t * info); +extern void xgi_restore_registers(struct xgi_info * info); +extern BOOL xgi_ge_irq_handler(struct xgi_info * info); +extern BOOL xgi_crt_irq_handler(struct xgi_info * info); +extern BOOL xgi_dvi_irq_handler(struct xgi_info * info); +extern void xgi_waitfor_pci_idle(struct xgi_info * info); #endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 1a4d8e12..a81dbe8b 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -33,11 +33,11 @@ #include "xgi_pcie.h" #include "xgi_misc.h" -static xgi_pcie_heap_t *xgi_pcie_heap = NULL; -static kmem_cache_t *xgi_pcie_cache_block = NULL; -static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; -static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; -static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +static struct xgi_pcie_heap *xgi_pcie_heap = NULL; +static struct kmem_cache *xgi_pcie_cache_block = NULL; +static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL; +static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL; +static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL; extern struct list_head xgi_mempid_list; static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) @@ -85,7 +85,7 @@ static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) free_pages(page_addr, page_order); } -static int xgi_pcie_lut_init(xgi_info_t * info) +static int xgi_pcie_lut_init(struct xgi_info * info) { unsigned char *page_addr = NULL; unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; @@ -214,7 +214,7 @@ static int xgi_pcie_lut_init(xgi_info_t * info) return 0; } -static void xgi_pcie_lut_cleanup(xgi_info_t * info) +static void xgi_pcie_lut_cleanup(struct xgi_info * info) { if (info->lut_base) { XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", @@ -225,10 +225,10 @@ static void xgi_pcie_lut_cleanup(xgi_info_t * info) } } -static xgi_pcie_block_t *xgi_pcie_new_node(void) +static struct xgi_pcie_block *xgi_pcie_new_node(void) { - xgi_pcie_block_t *block = - (xgi_pcie_block_t *) kmem_cache_alloc(xgi_pcie_cache_block, + struct xgi_pcie_block *block = + (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL); if (block == NULL) { return NULL; @@ -247,11 +247,11 @@ static xgi_pcie_block_t *xgi_pcie_new_node(void) return block; } -static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) +static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block) { struct page *page; - xgi_page_block_t *page_block = block->page_block; - xgi_page_block_t *free_block; + struct xgi_page_block *page_block = block->page_block; + struct xgi_page_block *free_block; unsigned long page_count = 0; int i; @@ -285,9 +285,9 @@ static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) } } -int xgi_pcie_heap_init(xgi_info_t * info) +int xgi_pcie_heap_init(struct xgi_info * info) { - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; if (!xgi_pcie_lut_init(info)) { XGI_ERROR("xgi_pcie_lut_init failed\n"); @@ -295,7 +295,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) } xgi_pcie_heap = - (xgi_pcie_heap_t *) kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); + (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL); if (!xgi_pcie_heap) { XGI_ERROR("xgi_pcie_heap alloc failed\n"); goto fail1; @@ -307,7 +307,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) xgi_pcie_heap->max_freesize = info->pcie.size; xgi_pcie_cache_block = - kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), 0, + kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (NULL == xgi_pcie_cache_block) { @@ -315,7 +315,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) goto fail2; } - block = (xgi_pcie_block_t *) xgi_pcie_new_node(); + block = (struct xgi_pcie_block *) xgi_pcie_new_node(); if (!block) { XGI_ERROR("xgi_pcie_new_node failed\n"); goto fail3; @@ -348,7 +348,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) void xgi_pcie_heap_check(void) { struct list_head *useList, *temp; - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; unsigned int ownerIndex; #ifdef XGI_DEBUG char *ownerStr[6] = @@ -360,7 +360,7 @@ void xgi_pcie_heap_check(void) temp = useList->next; XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); while (temp != useList) { - block = list_entry(temp, struct xgi_pcie_block_s, list); + block = list_entry(temp, struct xgi_pcie_block, list); if (block->owner == PCIE_2D) ownerIndex = 0; else if (block->owner > PCIE_3D_TEXTURE @@ -378,10 +378,10 @@ void xgi_pcie_heap_check(void) } } -void xgi_pcie_heap_cleanup(xgi_info_t * info) +void xgi_pcie_heap_cleanup(struct xgi_info * info) { struct list_head *free_list, *temp; - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; int j; xgi_pcie_lut_cleanup(info); @@ -394,7 +394,7 @@ void xgi_pcie_heap_cleanup(xgi_info_t * info) while (temp != free_list) { block = - list_entry(temp, struct xgi_pcie_block_s, + list_entry(temp, struct xgi_pcie_block, list); XGI_INFO ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", @@ -421,13 +421,13 @@ void xgi_pcie_heap_cleanup(xgi_info_t * info) } } -static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, +static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, unsigned long originalSize, enum PcieOwner owner) { struct list_head *free_list; - xgi_pcie_block_t *block, *used_block, *free_block; - xgi_page_block_t *page_block, *prev_page_block; + struct xgi_pcie_block *block, *used_block, *free_block; + struct xgi_page_block *page_block, *prev_page_block; struct page *page; unsigned long page_order = 0, count = 0, index = 0; unsigned long page_addr = 0; @@ -482,7 +482,7 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, free_list = xgi_pcie_heap->free_list.next; while (free_list != &xgi_pcie_heap->free_list) { //XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_pcie_block_s, list); + block = list_entry(free_list, struct xgi_pcie_block, list); if (size <= block->size) { break; } @@ -543,12 +543,12 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, used_block->page_order); used_block->page_block = NULL; - //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); - //if (!used_block->page_block) return NULL; + //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL); + //if (!used_block->page_block) return NULL;_t //used_block->page_block->next = NULL; used_block->page_table = - (xgi_pte_t *) kmalloc(sizeof(xgi_pte_t) * used_block->page_count, + (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count, GFP_KERNEL); if (used_block->page_table == NULL) { goto fail; @@ -595,8 +595,8 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, if (page_block == NULL) { page_block = - (xgi_page_block_t *) - kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); + (struct xgi_page_block *) + kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL); if (!page_block) { XGI_ERROR ("Can't get memory for page_block! \n"); @@ -697,17 +697,17 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, return NULL; } -static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, +static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, unsigned long offset) { struct list_head *free_list, *used_list; - xgi_pcie_block_t *used_block, *block = NULL; - xgi_pcie_block_t *prev, *next; + struct xgi_pcie_block *used_block, *block = NULL; + struct xgi_pcie_block *prev, *next; unsigned long upper, lower; used_list = xgi_pcie_heap->used_list.next; while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block_s, list); + block = list_entry(used_list, struct xgi_pcie_block, list); if (block->offset == offset) { break; } @@ -737,7 +737,7 @@ static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, free_list = xgi_pcie_heap->free_list.next; while (free_list != &xgi_pcie_heap->free_list) { - block = list_entry(free_list, struct xgi_pcie_block_s, list); + block = list_entry(free_list, struct xgi_pcie_block, list); if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { @@ -787,11 +787,11 @@ static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, return (used_block); } -void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, - enum PcieOwner owner, xgi_mem_alloc_t * alloc) +void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, + enum PcieOwner owner, struct xgi_mem_alloc * alloc) { - xgi_pcie_block_t *block; - xgi_mem_pid_t *mempid_block; + struct xgi_pcie_block *block; + struct xgi_mem_pid *mempid_block; xgi_down(info->pcie_sem); block = xgi_pcie_mem_alloc(info, size, owner); @@ -819,7 +819,7 @@ void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, */ if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { mempid_block = - kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); mempid_block->location = NON_LOCAL; @@ -837,12 +837,12 @@ void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, } } -void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) +void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) { - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; unsigned long offset = bus_addr - info->pcie.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; + struct xgi_mem_pid *mempid_block; + struct xgi_mem_pid *mempid_freeblock = NULL; struct list_head *mempid_list; char isvertex = 0; int processcnt; @@ -857,7 +857,7 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) mempid_list = xgi_mempid_list.next; while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) { ++processcnt; @@ -884,7 +884,7 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) mempid_list = xgi_mempid_list.next; while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) { @@ -906,17 +906,17 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) * given a bus address, fid the pcie mem block * uses the bus address as the key. */ -struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, - unsigned long address) +struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, + unsigned long address) { struct list_head *used_list; - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; int i; used_list = xgi_pcie_heap->used_list.next; while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block_s, list); + block = list_entry(used_list, struct xgi_pcie_block, list); if (block->bus_addr == address) { return block; @@ -946,7 +946,7 @@ struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not * the same block */ -void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) +void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) { struct list_head *used_list = xgi_pcie_heap->used_list.next; const unsigned long offset_in_page = address & (PAGE_SIZE - 1); @@ -956,8 +956,8 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) used_list, address, PAGE_SIZE - 1, offset_in_page); while (used_list != &xgi_pcie_heap->used_list) { - xgi_pcie_block_t *block = - list_entry(used_list, struct xgi_pcie_block_s, list); + struct xgi_pcie_block *block = + list_entry(used_list, struct xgi_pcie_block, list); XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", block, block->hw_addr, block->size); @@ -987,19 +987,19 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) return NULL; } -void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) { } -void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) { } /* address -- GE hw address */ -void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address) +void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address) { unsigned long *virtaddr = 0; if (address == 0) { diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index 6e8e45b9..b66d6a28 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -33,15 +33,15 @@ #define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ #endif -typedef struct xgi_page_block_s { - struct xgi_page_block_s *next; +struct xgi_page_block { + struct xgi_page_block *next; unsigned long phys_addr; unsigned long virt_addr; unsigned long page_count; unsigned long page_order; -} xgi_page_block_t; +}; -typedef struct xgi_pcie_block_s { +struct xgi_pcie_block { struct list_head list; unsigned long offset; /* block's offset in pcie memory, begin from 0 */ unsigned long size; /* The block size. */ @@ -50,19 +50,19 @@ typedef struct xgi_pcie_block_s { unsigned long page_count; unsigned long page_order; - xgi_page_block_t *page_block; - xgi_pte_t *page_table; /* list of physical pages allocated */ + struct xgi_page_block *page_block; + struct xgi_pte *page_table; /* list of physical pages allocated */ atomic_t use_count; enum PcieOwner owner; unsigned long processID; -} xgi_pcie_block_t; +}; -typedef struct xgi_pcie_heap_s { +struct xgi_pcie_heap { struct list_head free_list; struct list_head used_list; struct list_head sort_list; unsigned long max_freesize; -} xgi_pcie_heap_t; +}; #endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 487a7e15..0e54e7d8 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -50,25 +50,25 @@ #endif /* Hardware access functions */ -static inline void OUT3C5B(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3C4, index); OUTB(0x3C5, data); } -static inline void OUT3X5B(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3D4, index); OUTB(0x3D5, data); } -static inline void OUT3CFB(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3CE, index); OUTB(0x3CF, data); } -static inline u8 IN3C5B(xgi_info_t * info, u8 index) +static inline u8 IN3C5B(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3C4, index); @@ -76,7 +76,7 @@ static inline u8 IN3C5B(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3X5B(xgi_info_t * info, u8 index) +static inline u8 IN3X5B(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3D4, index); @@ -84,7 +84,7 @@ static inline u8 IN3X5B(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3CFB(xgi_info_t * info, u8 index) +static inline u8 IN3CFB(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3CE, index); @@ -92,25 +92,25 @@ static inline u8 IN3CFB(xgi_info_t * info, u8 index) return data; } -static inline void OUT3C5W(xgi_info_t * info, u8 index, u16 data) +static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data) { OUTB(0x3C4, index); OUTB(0x3C5, data); } -static inline void OUT3X5W(xgi_info_t * info, u8 index, u16 data) +static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data) { OUTB(0x3D4, index); OUTB(0x3D5, data); } -static inline void OUT3CFW(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3CE, index); OUTB(0x3CF, data); } -static inline u8 IN3C5W(xgi_info_t * info, u8 index) +static inline u8 IN3C5W(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3C4, index); @@ -118,7 +118,7 @@ static inline u8 IN3C5W(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3X5W(xgi_info_t * info, u8 index) +static inline u8 IN3X5W(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3D4, index); @@ -126,7 +126,7 @@ static inline u8 IN3X5W(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3CFW(xgi_info_t * info, u8 index) +static inline u8 IN3CFW(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3CE, index); @@ -134,14 +134,14 @@ static inline u8 IN3CFW(xgi_info_t * info, u8 index) return data; } -static inline u8 readAttr(xgi_info_t * info, u8 index) +static inline u8 readAttr(struct xgi_info * info, u8 index) { INB(0x3DA); /* flip-flop to index */ OUTB(0x3C0, index); return INB(0x3C1); } -static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) +static inline void writeAttr(struct xgi_info * info, u8 index, u8 value) { INB(0x3DA); /* flip-flop to index */ OUTB(0x3C0, index); @@ -151,7 +151,7 @@ static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) /* * Graphic engine register (2d/3d) acessing interface */ -static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) +static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data) { /* Jong 05/25/2006 */ XGI_INFO("Jong-WriteRegDWord()-Begin \n"); @@ -165,31 +165,31 @@ static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) XGI_INFO("Jong-WriteRegDWord()-End \n"); } -static inline void WriteRegWord(xgi_info_t * info, u32 addr, u16 data) +static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data) { *(volatile u16 *)(info->mmio.vbase + addr) = (data); } -static inline void WriteRegByte(xgi_info_t * info, u32 addr, u8 data) +static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data) { *(volatile u8 *)(info->mmio.vbase + addr) = (data); } -static inline u32 ReadRegDWord(xgi_info_t * info, u32 addr) +static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr) { volatile u32 data; data = *(volatile u32 *)(info->mmio.vbase + addr); return data; } -static inline u16 ReadRegWord(xgi_info_t * info, u32 addr) +static inline u16 ReadRegWord(struct xgi_info * info, u32 addr) { volatile u16 data; data = *(volatile u16 *)(info->mmio.vbase + addr); return data; } -static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) +static inline u8 ReadRegByte(struct xgi_info * info, u32 addr) { volatile u8 data; data = *(volatile u8 *)(info->mmio.vbase + addr); @@ -197,25 +197,25 @@ static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) } #if 0 -extern void OUT3C5B(xgi_info_t * info, u8 index, u8 data); -extern void OUT3X5B(xgi_info_t * info, u8 index, u8 data); -extern void OUT3CFB(xgi_info_t * info, u8 index, u8 data); -extern u8 IN3C5B(xgi_info_t * info, u8 index); -extern u8 IN3X5B(xgi_info_t * info, u8 index); -extern u8 IN3CFB(xgi_info_t * info, u8 index); -extern void OUT3C5W(xgi_info_t * info, u8 index, u8 data); -extern void OUT3X5W(xgi_info_t * info, u8 index, u8 data); -extern void OUT3CFW(xgi_info_t * info, u8 index, u8 data); -extern u8 IN3C5W(xgi_info_t * info, u8 index); -extern u8 IN3X5W(xgi_info_t * info, u8 index); -extern u8 IN3CFW(xgi_info_t * info, u8 index); +extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data); +extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data); +extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data); +extern u8 IN3C5B(struct xgi_info * info, u8 index); +extern u8 IN3X5B(struct xgi_info * info, u8 index); +extern u8 IN3CFB(struct xgi_info * info, u8 index); +extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data); +extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data); +extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data); +extern u8 IN3C5W(struct xgi_info * info, u8 index); +extern u8 IN3X5W(struct xgi_info * info, u8 index); +extern u8 IN3CFW(struct xgi_info * info, u8 index); -extern void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data); -extern void WriteRegWord(xgi_info_t * info, u32 addr, u16 data); -extern void WriteRegByte(xgi_info_t * info, u32 addr, u8 data); -extern u32 ReadRegDWord(xgi_info_t * info, u32 addr); -extern u16 ReadRegWord(xgi_info_t * info, u32 addr); -extern u8 ReadRegByte(xgi_info_t * info, u32 addr); +extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data); +extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data); +extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data); +extern u32 ReadRegDWord(struct xgi_info * info, u32 addr); +extern u16 ReadRegWord(struct xgi_info * info, u32 addr); +extern u8 ReadRegByte(struct xgi_info * info, u32 addr); extern void EnableProtect(); extern void DisableProtect(); @@ -262,19 +262,19 @@ extern void DisableProtect(); #define wReadReg(addr) ReadRegWord(info, addr) #define bReadReg(addr) ReadRegByte(info, addr) -static inline void xgi_protect_all(xgi_info_t * info) +static inline void xgi_protect_all(struct xgi_info * info) { OUTB(0x3C4, 0x11); OUTB(0x3C5, 0x92); } -static inline void xgi_unprotect_all(xgi_info_t * info) +static inline void xgi_unprotect_all(struct xgi_info * info) { OUTB(0x3C4, 0x11); OUTB(0x3C5, 0x92); } -static inline void xgi_enable_mmio(xgi_info_t * info) +static inline void xgi_enable_mmio(struct xgi_info * info) { u8 protect = 0; @@ -294,7 +294,7 @@ static inline void xgi_enable_mmio(xgi_info_t * info) OUTB(0x3C5, protect); } -static inline void xgi_disable_mmio(xgi_info_t * info) +static inline void xgi_disable_mmio(struct xgi_info * info) { u8 protect = 0; @@ -312,7 +312,7 @@ static inline void xgi_disable_mmio(xgi_info_t * info) outb(protect, 0x3C5); } -static inline void xgi_enable_ge(xgi_info_t * info) +static inline void xgi_enable_ge(struct xgi_info * info) { unsigned char bOld3cf2a = 0; int wait = 0; @@ -350,7 +350,7 @@ static inline void xgi_enable_ge(xgi_info_t * info) bOut3cf(0x2a, bOld3cf2a); } -static inline void xgi_disable_ge(xgi_info_t * info) +static inline void xgi_disable_ge(struct xgi_info * info) { int wait = 0; @@ -378,25 +378,25 @@ static inline void xgi_disable_ge(xgi_info_t * info) bOut3x5(0x36, 0); } -static inline void xgi_enable_dvi_interrupt(xgi_info_t * info) +static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) { Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 Out3cf(0x39, In3cf(0x39) | 0x02); } -static inline void xgi_disable_dvi_interrupt(xgi_info_t * info) +static inline void xgi_disable_dvi_interrupt(struct xgi_info * info) { Out3cf(0x39, In3cf(0x39) & ~0x02); } -static inline void xgi_enable_crt1_interrupt(xgi_info_t * info) +static inline void xgi_enable_crt1_interrupt(struct xgi_info * info) { Out3cf(0x3d, In3cf(0x3d) | 0x04); Out3cf(0x3d, In3cf(0x3d) & ~0x04); Out3cf(0x3d, In3cf(0x3d) | 0x08); } -static inline void xgi_disable_crt1_interrupt(xgi_info_t * info) +static inline void xgi_disable_crt1_interrupt(struct xgi_info * info) { Out3cf(0x3d, In3cf(0x3d) & ~0x08); } From ba3173fa39e236eee9ce9abb60f1151492378811 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:35:36 -0700 Subject: [PATCH 062/437] Eliminate unused integer and float typedefs. --- linux-core/xgi_types.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 65ec498b..89804667 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -33,27 +33,17 @@ * Typedefs * ***************************************************************************/ -typedef unsigned char V8; /* "void": enumerated or multiple fields */ -typedef unsigned short V16; /* "void": enumerated or multiple fields */ typedef unsigned char U8; /* 0 to 255 */ typedef unsigned short U16; /* 0 to 65535 */ -typedef signed char S8; /* -128 to 127 */ -typedef signed short S16; /* -32768 to 32767 */ -typedef float F32; /* IEEE Single Precision (S1E8M23) */ -typedef double F64; /* IEEE Double Precision (S1E11M52) */ typedef unsigned long BOOL; /* * mainly for 64-bit linux, where long is 64 bits * and win9x, where int is 16 bit. */ #if defined(vxworks) -typedef unsigned int V32; /* "void": enumerated or multiple fields */ typedef unsigned int U32; /* 0 to 4294967295 */ -typedef signed int S32; /* -2147483648 to 2147483647 */ #else -typedef unsigned long V32; /* "void": enumerated or multiple fields */ typedef unsigned long U32; /* 0 to 4294967295 */ -typedef signed long S32; /* -2147483648 to 2147483647 */ #endif #ifndef TRUE From 5da2a3c2d488983efed6f8433a304096e2bb75e8 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:37:01 -0700 Subject: [PATCH 063/437] Replace BOOL with bool. --- linux-core/xgi_misc.c | 14 +++++++------- linux-core/xgi_misc.h | 6 +++--- linux-core/xgi_types.h | 1 - 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 68c5ca20..280e69f1 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -111,13 +111,13 @@ void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) static U32 s_invalid_begin = 0; -BOOL xgi_ge_irq_handler(struct xgi_info * info) +bool xgi_ge_irq_handler(struct xgi_info * info) { volatile U8 *mmio_vbase = info->mmio.vbase; volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); U32 int_status = ge_3d_status[4]; // interrupt status U32 auto_reset_count = 0; - BOOL is_support_auto_reset = FALSE; + bool is_support_auto_reset = FALSE; // Check GE on/off if (0 == (0xffffc0f0 & int_status)) { @@ -128,7 +128,7 @@ BOOL xgi_ge_irq_handler(struct xgi_info * info) ge_3d_status[0x04] = int_status | 0x04000000; if (TRUE == is_support_auto_reset) { - BOOL is_wrong_signal = FALSE; + bool is_wrong_signal = FALSE; static U32 last_int_tick_low, last_int_tick_high; static U32 new_int_tick_low; @@ -287,9 +287,9 @@ BOOL xgi_ge_irq_handler(struct xgi_info * info) return FALSE; } -BOOL xgi_crt_irq_handler(struct xgi_info * info) +bool xgi_crt_irq_handler(struct xgi_info * info) { - BOOL ret = FALSE; + bool ret = FALSE; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened @@ -311,9 +311,9 @@ BOOL xgi_crt_irq_handler(struct xgi_info * info) return (ret); } -BOOL xgi_dvi_irq_handler(struct xgi_info * info) +bool xgi_dvi_irq_handler(struct xgi_info * info) { - BOOL ret = FALSE; + bool ret = FALSE; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 0ebbe7e8..4b944c4c 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -39,9 +39,9 @@ extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); extern int xgi_get_cpu_id(struct cpu_info *arg); extern void xgi_restore_registers(struct xgi_info * info); -extern BOOL xgi_ge_irq_handler(struct xgi_info * info); -extern BOOL xgi_crt_irq_handler(struct xgi_info * info); -extern BOOL xgi_dvi_irq_handler(struct xgi_info * info); +extern bool xgi_ge_irq_handler(struct xgi_info * info); +extern bool xgi_crt_irq_handler(struct xgi_info * info); +extern bool xgi_dvi_irq_handler(struct xgi_info * info); extern void xgi_waitfor_pci_idle(struct xgi_info * info); #endif diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 89804667..6d941abe 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -35,7 +35,6 @@ typedef unsigned char U8; /* 0 to 255 */ typedef unsigned short U16; /* 0 to 65535 */ -typedef unsigned long BOOL; /* * mainly for 64-bit linux, where long is 64 bits * and win9x, where int is 16 bit. From ec7730e5ba6ac1d60f90af483b3966d863cb5400 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:37:39 -0700 Subject: [PATCH 064/437] Eliminate unnecessary defines of TRUE and FALSE. --- linux-core/xgi_types.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 6d941abe..724f5f86 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -45,12 +45,4 @@ typedef unsigned int U32; /* 0 to 4294967295 */ typedef unsigned long U32; /* 0 to 4294967295 */ #endif -#ifndef TRUE -#define TRUE 1UL -#endif - -#ifndef FALSE -#define FALSE 0UL -#endif - #endif From 406ded3816300f6b3e945c932c44350b22f43bd9 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:41:32 -0700 Subject: [PATCH 065/437] Replace U(8|16) with u(8|16). --- linux-core/xgi_drv.h | 4 ++-- linux-core/xgi_misc.c | 38 +++++++++++++++++++------------------- linux-core/xgi_types.h | 2 -- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 32ee5e81..8431eb16 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -96,7 +96,7 @@ struct xgi_aperture { U32 base; // pcie base is different from fb base U32 size; - U8 *vbase; + u8 *vbase; }; struct xgi_screen_info { @@ -120,7 +120,7 @@ struct xgi_info { int slot; int vendor_id; U32 device_id; - U8 revision_id; + u8 revision_id; /* physical characteristics */ struct xgi_aperture mmio; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 280e69f1..96ad12ee 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -113,7 +113,7 @@ static U32 s_invalid_begin = 0; bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile U8 *mmio_vbase = info->mmio.vbase; + volatile u8 *mmio_vbase = info->mmio.vbase; volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); U32 int_status = ge_3d_status[4]; // interrupt status U32 auto_reset_count = 0; @@ -135,11 +135,11 @@ bool xgi_ge_irq_handler(struct xgi_info * info) static U32 continoue_int_count = 0; // OE II is busy. while (old_ge_status & 0x001c0000) { - U16 check; + u16 check; // Check Read back status *(mmio_vbase + 0x235c) = 0x80; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { @@ -149,7 +149,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RO channel *(mmio_vbase + 0x235c) = 0x83; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { @@ -159,7 +159,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RW channel *(mmio_vbase + 0x235c) = 0x88; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { @@ -169,7 +169,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RO channel outstanding *(mmio_vbase + 0x235c) = 0x8f; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if (0 != (check & 0x3ff)) { is_wrong_signal = TRUE; @@ -178,7 +178,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RW channel outstanding *(mmio_vbase + 0x235c) = 0x90; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if (0 != (check & 0x3ff)) { is_wrong_signal = TRUE; @@ -216,10 +216,10 @@ bool xgi_ge_irq_handler(struct xgi_info * info) ((--time_out) & 0xfff)) ; if (0 == time_out) { - U8 old_3ce; - U8 old_3cf; - U8 old_index; - U8 old_36; + u8 old_3ce; + u8 old_3cf; + u8 old_index; + u8 old_36; XGI_INFO ("Can not reset back 0x%lx!\n", @@ -290,12 +290,12 @@ bool xgi_ge_irq_handler(struct xgi_info * info) bool xgi_crt_irq_handler(struct xgi_info * info) { bool ret = FALSE; - U8 save_3ce = bReadReg(0x3ce); + u8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened { - U8 op3cf_3d; - U8 op3cf_37; + u8 op3cf_3d; + u8 op3cf_37; // What happened? op3cf_37 = bIn3cf(0x37); @@ -314,14 +314,14 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool xgi_dvi_irq_handler(struct xgi_info * info) { bool ret = FALSE; - U8 save_3ce = bReadReg(0x3ce); + u8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened { - U8 op3cf_39; - U8 op3cf_37; - U8 op3x5_5a; - U8 save_3x4 = bReadReg(0x3d4);; + u8 op3cf_39; + u8 op3cf_37; + u8 op3x5_5a; + u8 save_3x4 = bReadReg(0x3d4);; // What happened? op3cf_37 = bIn3cf(0x37); diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 724f5f86..f9a3360c 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -33,8 +33,6 @@ * Typedefs * ***************************************************************************/ -typedef unsigned char U8; /* 0 to 255 */ -typedef unsigned short U16; /* 0 to 65535 */ /* * mainly for 64-bit linux, where long is 64 bits * and win9x, where int is 16 bit. From 37733786582d04f072178949cc9e31225abf5577 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 20:49:21 -0700 Subject: [PATCH 066/437] Delete unused arrays s_emptyBegin and s_flush2D. --- linux-core/xgi_cmdlist.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 2cdf714f..b67a40f6 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -33,20 +33,6 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" -U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = { - 0x10000000, // 3D Type Begin, Invalid - 0x80000004, // Length = 4; - 0x00000000, - 0x00000000 -}; - -U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { - FLUSH_2D, - FLUSH_2D, - FLUSH_2D, - FLUSH_2D -}; - struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); From e206c4c59da0e81ed65796d543c311fc7e30b19a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:00:50 -0700 Subject: [PATCH 067/437] Convert some PCI-e GART related variable to generic types. A few of the PCI-e GART related fields in struct xgi_info were hardcoded to u32. None of them need to be. Convert them to either unsigned int or bool. --- linux-core/xgi_drv.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 8431eb16..3cb6dc7f 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -131,10 +131,10 @@ struct xgi_info { /* look up table parameters */ U32 *lut_base; - U32 lutPageSize; - U32 lutPageOrder; - U32 isLUTInLFB; - U32 sdfbPageSize; + unsigned int lutPageSize; + unsigned int lutPageOrder; + bool isLUTInLFB; + unsigned int sdfbPageSize; U32 pcie_config; U32 pcie_status; From 4c4780bc8e5bf01b2b920c6b8de4ddbd0256c81f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:05:16 -0700 Subject: [PATCH 068/437] Stop-gap fix in xgi_submit_cmdlist Comment in the code explains it. Basically, I put an if-statement around a block of code to prevent a NULL pointer dereference that should never happen in the first place. Eventually, this will need to come out. --- linux-core/xgi_cmdlist.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index b67a40f6..f8aacea2 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -198,17 +198,24 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - lastBatchVirtAddr[1] = - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; - lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; - lastBatchVirtAddr[3] = 0; - //barrier(); - lastBatchVirtAddr[0] = - (beginPort << 22) + (BEGIN_VALID_MASK) + - (0xffff & pCmdInfo->_curDebugID); + /* lastBatchVirtAddr should *never* be NULL. However, there + * are currently some bugs that cause this to happen. The + * if-statement here prevents some fatal (i.e., hard lock + * requiring the reset button) oopses. + */ + if (lastBatchVirtAddr) { + lastBatchVirtAddr[1] = + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; + lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; + lastBatchVirtAddr[3] = 0; + //barrier(); + lastBatchVirtAddr[0] = + (beginPort << 22) + (BEGIN_VALID_MASK) + + (0xffff & pCmdInfo->_curDebugID); - /* Jong 06/12/2006; system hang; marked for test */ - triggerHWCommandList(info, pCmdInfo->_beginCount); + /* Jong 06/12/2006; system hang; marked for test */ + triggerHWCommandList(info, pCmdInfo->_beginCount); + } XGI_INFO ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); From 4403540776c8ed3c2e28f26b6dacaab0b9e40e05 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:15:33 -0700 Subject: [PATCH 069/437] Clean up xgi_pcie_heap_check The whole purpose of xgi_pcie_heap_check is to log information about entries on the used_list. If XGI_DEBUG is not set, it doesn't print anything. Therefore we can #ifdef the whole function body. Convert open-code list iteration to use list_for_each_entry. --- linux-core/xgi_pcie.c | 44 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index a81dbe8b..dd758013 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -347,35 +347,31 @@ int xgi_pcie_heap_init(struct xgi_info * info) void xgi_pcie_heap_check(void) { - struct list_head *useList, *temp; +#ifdef XGI_DEBUG struct xgi_pcie_block *block; unsigned int ownerIndex; -#ifdef XGI_DEBUG - char *ownerStr[6] = + static const char *const ownerStr[6] = { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; -#endif - - if (xgi_pcie_heap) { - useList = &xgi_pcie_heap->used_list; - temp = useList->next; - XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); - while (temp != useList) { - block = list_entry(temp, struct xgi_pcie_block, list); - if (block->owner == PCIE_2D) - ownerIndex = 0; - else if (block->owner > PCIE_3D_TEXTURE - || block->owner < PCIE_2D - || block->owner < PCIE_3D) - ownerIndex = 5; - else - ownerIndex = block->owner - PCIE_3D + 1; - XGI_INFO - ("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", - ownerStr[ownerIndex], block->offset, block->size); - temp = temp->next; - } + if (!xgi_pcie_heap) { + return; } + + XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { + if (block->owner == PCIE_2D) + ownerIndex = 0; + else if (block->owner > PCIE_3D_TEXTURE + || block->owner < PCIE_2D + || block->owner < PCIE_3D) + ownerIndex = 5; + else + ownerIndex = block->owner - PCIE_3D + 1; + + XGI_INFO("Allocated by %s, block offset: 0x%lx, size: 0x%lx \n", + ownerStr[ownerIndex], block->offset, block->size); + } +#endif } void xgi_pcie_heap_cleanup(struct xgi_info * info) From 32584d94e6ef7c0b463794a40541eb8183c7fb02 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:35:27 -0700 Subject: [PATCH 070/437] Convert open coded list iterators to either list_for_each_entry or list_for_each_entry_safe --- linux-core/xgi_fb.c | 43 ++++++------------------- linux-core/xgi_misc.c | 39 ++++++++++------------ linux-core/xgi_pcie.c | 75 +++++++++++-------------------------------- 3 files changed, 45 insertions(+), 112 deletions(-) diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 56cc589b..32fde5ab 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -96,7 +96,6 @@ void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) unsigned long offset = bus_addr - info->fb.base; struct xgi_mem_pid *mempid_block; struct xgi_mem_pid *mempid_freeblock = NULL; - struct list_head *mempid_list; if (offset < 0) { XGI_INFO("free onscreen frame buffer successfully !\n"); @@ -111,16 +110,12 @@ void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) } /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); + list_for_each_entry(mempid_block, &xgi_mempid_list, list) { if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) { mempid_freeblock = mempid_block; break; } - mempid_list = mempid_list->next; } if (mempid_freeblock) { list_del(&mempid_freeblock->list); @@ -192,20 +187,15 @@ int xgi_fb_heap_init(struct xgi_info * info) void xgi_fb_heap_cleanup(struct xgi_info * info) { - struct list_head *free_list, *temp; + struct list_head *free_list; struct xgi_mem_block *block; + struct xgi_mem_block *next; int i; if (xgi_fb_heap) { free_list = &xgi_fb_heap->free_list; for (i = 0; i < 3; i++, free_list++) { - temp = free_list->next; - while (temp != free_list) { - block = - list_entry(temp, struct xgi_mem_block, - list); - temp = temp->next; - + list_for_each_entry_safe(block, next, free_list, list) { XGI_INFO ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", i, block->offset, block->size); @@ -334,7 +324,6 @@ static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long originalSize) { - struct list_head *free_list; struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -354,18 +343,14 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, return (NULL); } - free_list = xgi_fb_heap->free_list.next; - - while (free_list != &xgi_fb_heap->free_list) { + list_for_each_entry(block, &xgi_fb_heap->free_list, list) { XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_mem_block, list); if (size <= block->size) { break; } - free_list = free_list->next; } - if (free_list == &xgi_fb_heap->free_list) { + if (&block->list == &xgi_fb_heap->free_list) { XGI_ERROR ("Can't allocate %ldk size from frame buffer memory !\n", size / 1024); @@ -408,23 +393,19 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset) { - struct list_head *free_list, *used_list; - struct xgi_mem_block *used_block = NULL, *block = NULL; + struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; unsigned long upper; unsigned long lower; - used_list = xgi_fb_heap->used_list.next; - while (used_list != &xgi_fb_heap->used_list) { - block = list_entry(used_list, struct xgi_mem_block, list); + list_for_each_entry(block, &xgi_fb_heap->used_list, list) { if (block->offset == offset) { break; } - used_list = used_list->next; } - if (used_list == &xgi_fb_heap->used_list) { + if (&block->list == &xgi_fb_heap->used_list) { XGI_ERROR("can't find block: 0x%lx to free!\n", offset); return (NULL); } @@ -439,16 +420,12 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long upper = used_block->offset + used_block->size; lower = used_block->offset; - free_list = xgi_fb_heap->free_list.next; - while (free_list != &xgi_fb_heap->free_list) { - block = list_entry(free_list, struct xgi_mem_block, list); - + list_for_each_entry(block, &xgi_fb_heap->free_list, list) { if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { prev = block; } - free_list = free_list->next; } XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 96ad12ee..eecd717b 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -556,50 +556,45 @@ int xgi_get_cpu_id(struct cpu_info *arg) extern struct list_head xgi_mempid_list; void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) { - struct xgi_mem_pid *mempid_block; - struct list_head *mempid_list; + struct xgi_mem_pid *block; + struct xgi_mem_pid *next; struct task_struct *p, *find; unsigned int cnt = 0; - mempid_list = xgi_mempid_list.next; - - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); - mempid_list = mempid_list->next; + list_for_each_entry_safe(block, next, &xgi_mempid_list, list) { find = NULL; XGI_SCAN_PROCESS(p) { - if (p->pid == mempid_block->pid) { + if (p->pid == block->pid) { XGI_INFO ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", - mempid_block->pid, p->state, - mempid_block->location, - mempid_block->bus_addr); + block->pid, p->state, + block->location, + block->bus_addr); find = p; - if (mempid_block->bus_addr == 0xFFFFFFFF) + if (block->bus_addr == 0xFFFFFFFF) ++cnt; break; } } if (!find) { - if (mempid_block->location == LOCAL) { + if (block->location == LOCAL) { XGI_INFO ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", - mempid_block->pid, mempid_block->bus_addr); - xgi_fb_free(info, mempid_block->bus_addr); - } else if (mempid_block->bus_addr != 0xFFFFFFFF) { + block->pid, block->bus_addr); + xgi_fb_free(info, block->bus_addr); + } else if (block->bus_addr != 0xFFFFFFFF) { XGI_INFO ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", - mempid_block->pid, mempid_block->bus_addr); - xgi_pcie_free(info, mempid_block->bus_addr); + block->pid, block->bus_addr); + xgi_pcie_free(info, block->bus_addr); } else { /*only delete the memory block */ - list_del(&mempid_block->list); + list_del(&block->list); XGI_INFO ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", - mempid_block->pid); - kfree(mempid_block); + block->pid); + kfree(block); } } } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index dd758013..e451ebd5 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -376,8 +376,9 @@ void xgi_pcie_heap_check(void) void xgi_pcie_heap_cleanup(struct xgi_info * info) { - struct list_head *free_list, *temp; + struct list_head *free_list; struct xgi_pcie_block *block; + struct xgi_pcie_block *next; int j; xgi_pcie_lut_cleanup(info); @@ -386,23 +387,16 @@ void xgi_pcie_heap_cleanup(struct xgi_info * info) if (xgi_pcie_heap) { free_list = &xgi_pcie_heap->free_list; for (j = 0; j < 3; j++, free_list++) { - temp = free_list->next; - - while (temp != free_list) { - block = - list_entry(temp, struct xgi_pcie_block, - list); + list_for_each_entry_safe(block, next, free_list, list) { XGI_INFO - ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + ("No. %d block offset: 0x%lx size: 0x%lx\n", j, block->offset, block->size); xgi_pcie_block_stuff_free(block); block->bus_addr = 0; block->hw_addr = 0; - temp = temp->next; //XGI_INFO("No. %d free block: 0x%p \n", j, block); kmem_cache_free(xgi_pcie_cache_block, block); - block = NULL; } } @@ -421,7 +415,6 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, unsigned long originalSize, enum PcieOwner owner) { - struct list_head *free_list; struct xgi_pcie_block *block, *used_block, *free_block; struct xgi_page_block *page_block, *prev_page_block; struct page *page; @@ -475,17 +468,13 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, } /* Jong 05/30/2006; find next free list which has enough space */ - free_list = xgi_pcie_heap->free_list.next; - while (free_list != &xgi_pcie_heap->free_list) { - //XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { if (size <= block->size) { break; } - free_list = free_list->next; } - if (free_list == &xgi_pcie_heap->free_list) { + if (&block->list == &xgi_pcie_heap->free_list) { XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size / 1024); return (NULL); @@ -696,21 +685,17 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, unsigned long offset) { - struct list_head *free_list, *used_list; - struct xgi_pcie_block *used_block, *block = NULL; + struct xgi_pcie_block *used_block, *block; struct xgi_pcie_block *prev, *next; unsigned long upper, lower; - used_list = xgi_pcie_heap->used_list.next; - while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { if (block->offset == offset) { break; } - used_list = used_list->next; } - if (used_list == &xgi_pcie_heap->used_list) { + if (&block->list == &xgi_pcie_heap->used_list) { XGI_ERROR("can't find block: 0x%lx to free!\n", offset); return (NULL); } @@ -730,16 +715,12 @@ static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, upper = used_block->offset + used_block->size; lower = used_block->offset; - free_list = xgi_pcie_heap->free_list.next; - - while (free_list != &xgi_pcie_heap->free_list) { - block = list_entry(free_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { prev = block; } - free_list = free_list->next; } XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); @@ -839,7 +820,6 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) unsigned long offset = bus_addr - info->pcie.base; struct xgi_mem_pid *mempid_block; struct xgi_mem_pid *mempid_freeblock = NULL; - struct list_head *mempid_list; char isvertex = 0; int processcnt; @@ -850,15 +830,12 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) if (isvertex) { /*check is there any other process using vertex */ processcnt = 0; - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); + + list_for_each_entry(mempid_block, &xgi_mempid_list, list) { if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) { ++processcnt; } - mempid_list = mempid_list->next; } if (processcnt > 1) { return; @@ -877,17 +854,13 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) xgi_pcie_vertex_block = NULL; /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); + list_for_each_entry(mempid_block, &xgi_mempid_list, list) { if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) { mempid_freeblock = mempid_block; break; } - mempid_list = mempid_list->next; } if (mempid_freeblock) { list_del(&mempid_freeblock->list); @@ -905,15 +878,11 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address) { - struct list_head *used_list; struct xgi_pcie_block *block; int i; - used_list = xgi_pcie_heap->used_list.next; - - while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { if (block->bus_addr == address) { return block; } @@ -927,7 +896,6 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, } } } - used_list = used_list->next; } XGI_ERROR("could not find map for vm 0x%lx\n", address); @@ -944,17 +912,13 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, */ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) { - struct list_head *used_list = xgi_pcie_heap->used_list.next; + struct xgi_pcie_block *block; const unsigned long offset_in_page = address & (PAGE_SIZE - 1); - XGI_INFO("begin (used_list = 0x%p, address = 0x%lx, " - "PAGE_SIZE - 1 = %lu, offset_in_page = %lu)\n", - used_list, address, PAGE_SIZE - 1, offset_in_page); - - while (used_list != &xgi_pcie_heap->used_list) { - struct xgi_pcie_block *block = - list_entry(used_list, struct xgi_pcie_block, list); + XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n", + address, offset_in_page); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", block, block->hw_addr, block->size); @@ -973,9 +937,6 @@ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) XGI_INFO("return 0x%p\n", ret); return ret; - } else { - XGI_INFO("used_list = used_list->next;\n"); - used_list = used_list->next; } } From 49ccec1b0845ea14ab2cfd2f53704fe26e38fbef Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:38:48 -0700 Subject: [PATCH 071/437] Convert xgi_mem_location enum values to less generic names. --- linux-core/xgi_drv.h | 6 +++--- linux-core/xgi_fb.c | 10 +++++----- linux-core/xgi_misc.c | 2 +- linux-core/xgi_pcie.c | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 3cb6dc7f..360e7120 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -158,9 +158,9 @@ struct xgi_ioctl_post_vbios { }; enum xgi_mem_location { - NON_LOCAL = 0, - LOCAL = 1, - INVALID = 0x7fffffff + XGI_MEMLOC_NON_LOCAL = 0, + XGI_MEMLOC_LOCAL = 1, + XGI_MEMLOC_INVALID = 0x7fffffff }; enum PcieOwner { diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 32fde5ab..d7e9285d 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -48,7 +48,7 @@ void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_pid *mempid_block; if (req->is_front) { - alloc->location = LOCAL; + alloc->location = XGI_MEMLOC_LOCAL; alloc->bus_addr = info->fb.base; alloc->hw_addr = 0; XGI_INFO @@ -59,7 +59,7 @@ void xgi_fb_alloc(struct xgi_info * info, xgi_up(info->fb_sem); if (block == NULL) { - alloc->location = LOCAL; + alloc->location = XGI_MEMLOC_LOCAL; alloc->size = 0; alloc->bus_addr = 0; alloc->hw_addr = 0; @@ -67,7 +67,7 @@ void xgi_fb_alloc(struct xgi_info * info, } else { XGI_INFO("Video RAM allocation succeeded: 0x%p\n", (char *)block->offset); - alloc->location = LOCAL; + alloc->location = XGI_MEMLOC_LOCAL; alloc->size = block->size; alloc->bus_addr = info->fb.base + block->offset; alloc->hw_addr = block->offset; @@ -75,7 +75,7 @@ void xgi_fb_alloc(struct xgi_info * info, /* manage mempid */ mempid_block = kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); - mempid_block->location = LOCAL; + mempid_block->location = XGI_MEMLOC_LOCAL; mempid_block->bus_addr = alloc->bus_addr; mempid_block->pid = alloc->pid; @@ -111,7 +111,7 @@ void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) /* manage mempid */ list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == LOCAL + if (mempid_block->location == XGI_MEMLOC_LOCAL && mempid_block->bus_addr == bus_addr) { mempid_freeblock = mempid_block; break; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index eecd717b..b7923228 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -578,7 +578,7 @@ void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) } } if (!find) { - if (block->location == LOCAL) { + if (block->location == XGI_MEMLOC_LOCAL) { XGI_INFO ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", block->pid, block->bus_addr); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index e451ebd5..82111249 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -775,7 +775,7 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, xgi_up(info->pcie_sem); if (block == NULL) { - alloc->location = INVALID; + alloc->location = XGI_MEMLOC_INVALID; alloc->size = 0; alloc->bus_addr = 0; alloc->hw_addr = 0; @@ -784,7 +784,7 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, XGI_INFO ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", block->offset, block->bus_addr); - alloc->location = NON_LOCAL; + alloc->location = XGI_MEMLOC_NON_LOCAL; alloc->size = block->size; alloc->bus_addr = block->bus_addr; alloc->hw_addr = block->hw_addr; @@ -799,7 +799,7 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); - mempid_block->location = NON_LOCAL; + mempid_block->location = XGI_MEMLOC_NON_LOCAL; if (owner == PCIE_3D) mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ else @@ -832,7 +832,7 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) processcnt = 0; list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == NON_LOCAL + if (mempid_block->location == XGI_MEMLOC_NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) { ++processcnt; } @@ -855,7 +855,7 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) /* manage mempid */ list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == NON_LOCAL + if (mempid_block->location == XGI_MEMLOC_NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) { mempid_freeblock = mempid_block; From fc37781dd30b53815dd71ce576eb2147d23f0914 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:48:31 -0700 Subject: [PATCH 072/437] Convert a few more U32 variables to more appropriate, generic types. --- linux-core/xgi_cmdlist.c | 14 +++++++------- linux-core/xgi_cmdlist.h | 2 +- linux-core/xgi_linux.h | 2 +- linux-core/xgi_misc.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index f8aacea2..04ee6e82 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -36,11 +36,11 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); -static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); +static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter); static void xgi_cmdlist_reset(void); -int xgi_cmdlist_initialize(struct xgi_info * info, U32 size) +int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { //struct xgi_mem_req mem_req; struct xgi_mem_alloc mem_alloc; @@ -64,7 +64,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, U32 size) void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) { - U32 beginPort; + unsigned int beginPort; /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ /* Jong 05/25/2006 */ @@ -77,7 +77,7 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* return; */ if (s_cmdring._lastBatchStartAddr == 0) { - U32 portOffset; + unsigned int portOffset; /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ @@ -278,17 +278,17 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter) { - static U32 s_triggerID = 1; + static unsigned int s_triggerID = 1; //Fix me, currently we just trigger one time while (triggerCounter--) { dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, - 0x05000000 + (0xffff & s_triggerID++)); + 0x05000000 + (0x0ffff & s_triggerID++)); // xgi_waitfor_pci_idle(info); } } -static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) +static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) { // Convert the batch type to begin port ID switch (pCmdInfo->_firstBeginType) { diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index b11511ff..c6221511 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -65,7 +65,7 @@ struct xgi_cmdring_info { U32 _cmdRingOffset; }; -extern int xgi_cmdlist_initialize(struct xgi_info * info, U32 size); +extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo); diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 2602b0f5..99bf2d04 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -455,7 +455,7 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr) struct xgi_file_private { struct xgi_info *info; - U32 num_events; + unsigned int num_events; spinlock_t fp_lock; wait_queue_head_t wait_queue; }; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index b7923228..9bf8205b 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -109,7 +109,7 @@ void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) */ #define STALL_INTERRUPT_RESET_THRESHOLD 0xffff -static U32 s_invalid_begin = 0; +static unsigned int s_invalid_begin = 0; bool xgi_ge_irq_handler(struct xgi_info * info) { From b323ab52aa9ccbfb06dd723ece361a5242d067b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Thu, 28 Jun 2007 14:45:26 -0400 Subject: [PATCH 073/437] Drop drm_drawable_list and add drm_drawable_info directly to the idr. --- linux-core/drmP.h | 4 ---- linux-core/drm_drawable.c | 41 ++++++++++++++++++++------------------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index dd3a69df..7bcd095a 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -598,10 +598,6 @@ typedef struct ati_pcigart_info { int table_size; } drm_ati_pcigart_info; -struct drm_drawable_list { - drm_drawable_info_t info; -}; - #include "drm_objects.h" /** diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index eb44a189..74f0bb5d 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -44,19 +44,19 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - struct drm_drawable_list *draw_info; + struct drm_drawable_info *draw_info; drm_draw_t draw; int new_id = 0; int ret; - draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + draw_info = drm_calloc(1, sizeof(*draw_info), DRM_MEM_BUFS); if (!draw_info) return -ENOMEM; again: if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); return -ENOMEM; } @@ -86,7 +86,7 @@ int drm_rmdraw(DRM_IOCTL_ARGS) DRM_DEVICE; drm_draw_t draw; unsigned long irqflags; - struct drm_drawable_list *draw_info; + struct drm_drawable_info *draw_info; DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, sizeof(draw)); @@ -100,33 +100,31 @@ int drm_rmdraw(DRM_IOCTL_ARGS) spin_lock_irqsave(&dev->drw_lock, irqflags); idr_remove(&dev->drw_idr, draw.handle); - drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("%d\n", draw.handle); return 0; } -int drm_update_drawable_info(DRM_IOCTL_ARGS) { +int drm_update_drawable_info(DRM_IOCTL_ARGS) +{ DRM_DEVICE; drm_update_draw_t update; unsigned long irqflags; - drm_drawable_info_t *info; drm_clip_rect_t *rects; - struct drm_drawable_list *draw_info; + struct drm_drawable_info *info; int err; DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, sizeof(update)); - draw_info = idr_find(&dev->drw_idr, update.handle); - if (!draw_info) { + info = idr_find(&dev->drw_idr, update.handle); + if (!info) { DRM_ERROR("No such drawable %d\n", update.handle); return DRM_ERR(EINVAL); } - info = &draw_info->info; - switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { @@ -184,24 +182,27 @@ error: /** * Caller must hold the drawable spinlock! */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - struct drm_drawable_list *draw_info; - draw_info = idr_find(&dev->drw_idr, id); - if (!draw_info) { +drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +{ + struct drm_drawable_info *info; + + info = idr_find(&dev->drw_idr, id); + if (!info) { DRM_DEBUG("No such drawable %d\n", id); return NULL; } - return &draw_info->info; + return info; } EXPORT_SYMBOL(drm_get_drawable_info); static int drm_drawable_free(int idr, void *p, void *data) { - struct drm_drawable_list *drw_entry = p; - drm_free(drw_entry->info.rects, drw_entry->info.num_rects * + struct drm_drawable_info *info = p; + + drm_free(info->rects, info->num_rects * sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); return 0; } From c9d752ff4fb2b6eee2fef636193fc9ca29abba37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Mon, 2 Jul 2007 17:52:07 -0400 Subject: [PATCH 074/437] Fix must-check warnings and implement a few error paths. --- linux-core/drm_drv.c | 2 +- linux-core/drm_stub.c | 26 +++++++++++++++++--------- linux-core/drm_sysfs.c | 31 +++++++++++++++++++++++++------ 3 files changed, 43 insertions(+), 16 deletions(-) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index d5eb9713..6bbe7fca 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -311,7 +311,7 @@ int drm_init(struct drm_driver *driver, } if (!drm_fb_loaded) - pci_register_driver(&driver->pci_driver); + return pci_register_driver(&driver->pci_driver); else { for (i = 0; pciidlist[i].vendor != 0; i++) { pid = &pciidlist[i]; diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index f57ed9cc..b96408ab 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -232,18 +232,22 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, if (!drm_fb_loaded) { pci_set_drvdata(pdev, dev); - pci_request_regions(pdev, driver->pci_driver.name); + ret = pci_request_regions(pdev, driver->pci_driver.name); + if (ret) + goto err_g1; } - pci_enable_device(pdev); + ret = pci_enable_device(pdev); + if (ret) + goto err_g2; pci_set_master(pdev); if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { printk(KERN_ERR "DRM: fill_in_dev failed\n"); - goto err_g1; + goto err_g3; } if ((ret = drm_get_head(dev, &dev->primary))) - goto err_g1; + goto err_g3; DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, @@ -251,12 +255,16 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, return 0; -err_g1: - if (!drm_fb_loaded) { - pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); + err_g3: + if (!drm_fb_loaded) pci_disable_device(pdev); - } + err_g2: + if (!drm_fb_loaded) + pci_release_regions(pdev); + err_g1: + if (!drm_fb_loaded) + pci_set_drvdata(pdev, NULL); + drm_free(dev, sizeof(*dev), DRM_MEM_STUB); printk(KERN_ERR "DRM: drm_get_dev failed.\n"); return ret; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index ace0778b..9b2f5dce 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -93,11 +93,15 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) retval = class_register(&cs->class); if (retval) goto error; - class_create_file(&cs->class, &class_attr_version); + retval = class_create_file(&cs->class, &class_attr_version); + if (retval) + goto error_with_class; return cs; - error: + error_with_class: + class_unregister(&cs->class); + error: kfree(cs); return ERR_PTR(retval); } @@ -170,16 +174,31 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, if (retval) goto error; - class_device_create_file(&s_dev->class_dev, &cs->attr); + retval = class_device_create_file(&s_dev->class_dev, &cs->attr); + if (retval) + goto error_with_device; + class_set_devdata(&s_dev->class_dev, head); - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]); + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { + retval = class_device_create_file(&s_dev->class_dev, + &class_device_attrs[i]); + if (retval) + goto error_with_files; + } return &s_dev->class_dev; -error: + error_with_files: + while (i > 0) + class_device_remove_file(&s_dev->class_dev, + &class_device_attrs[--i]); + class_device_remove_file(&s_dev->class_dev, &cs->attr); + error_with_device: + class_device_unregister(&s_dev->class_dev); + error: kfree(s_dev); + return ERR_PTR(retval); } From 8d96ba9805316b29e948d7594344feebb17042f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 3 Jul 2007 11:41:44 +0200 Subject: [PATCH 075/437] Restore pre-idr semantics for drawable information. There's a difference between a drawable ID not having valid drawable information and not being allocated at all. Not making the distinction would break i915 DRM swap scheduling with older X servers that don't push drawable cliprect information to the DRM. --- linux-core/drm_drawable.c | 43 +++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 74f0bb5d..7657e954 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -37,6 +37,8 @@ #include "drmP.h" +#define NO_DRW_INFO (void*)1 + /** * Allocate drawable ID and memory to store information about it. */ @@ -44,24 +46,18 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - struct drm_drawable_info *draw_info; drm_draw_t draw; int new_id = 0; int ret; - draw_info = drm_calloc(1, sizeof(*draw_info), DRM_MEM_BUFS); - if (!draw_info) - return -ENOMEM; - again: if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); return -ENOMEM; } spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id); + ret = idr_get_new_above(&dev->drw_idr, NO_DRW_INFO, 1, &new_id); if (ret == -EAGAIN) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); goto again; @@ -86,21 +82,16 @@ int drm_rmdraw(DRM_IOCTL_ARGS) DRM_DEVICE; drm_draw_t draw; unsigned long irqflags; - struct drm_drawable_info *draw_info; DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, sizeof(draw)); - draw_info = idr_find(&dev->drw_idr, draw.handle); - if (!draw_info) { - DRM_DEBUG("No such drawable %d\n", draw.handle); - return -EINVAL; - } - spin_lock_irqsave(&dev->drw_lock, irqflags); + drm_free(drm_get_drawable_info(dev, draw.handle), + sizeof(struct drm_drawable_info), DRM_MEM_BUFS); + idr_remove(&dev->drw_idr, draw.handle); - drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("%d\n", draw.handle); @@ -125,6 +116,13 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } + if (info == NO_DRW_INFO) { + info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); + if (!info) + return -ENOMEM; + idr_replace(&dev->drw_idr, info, update.handle); + } + switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { @@ -187,11 +185,17 @@ drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) struct drm_drawable_info *info; info = idr_find(&dev->drw_idr, id); + if (!info) { DRM_DEBUG("No such drawable %d\n", id); return NULL; } + if (info == NO_DRW_INFO) { + DRM_DEBUG("No information for drawable %d\n", id); + return NULL; + } + return info; } EXPORT_SYMBOL(drm_get_drawable_info); @@ -200,9 +204,12 @@ static int drm_drawable_free(int idr, void *p, void *data) { struct drm_drawable_info *info = p; - drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); + if (info != NO_DRW_INFO) { + drm_free(info->rects, info->num_rects * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + } + return 0; } From ea832a8e555c9e1f90830b55cbd970d0eca0e2cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 3 Jul 2007 12:15:15 +0200 Subject: [PATCH 076/437] Simplification for previous commit. Dave Airlie pointed out on IRC that idr_replace lets us know if the ID hasn't been allocated, so we don't need a special pointer value for allocated IDs that don't have valid information yet. --- linux-core/drm_drawable.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 7657e954..57b62ca4 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -37,8 +37,6 @@ #include "drmP.h" -#define NO_DRW_INFO (void*)1 - /** * Allocate drawable ID and memory to store information about it. */ @@ -57,7 +55,7 @@ again: } spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, NO_DRW_INFO, 1, &new_id); + ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); if (ret == -EAGAIN) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); goto again; @@ -112,15 +110,15 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) info = idr_find(&dev->drw_idr, update.handle); if (!info) { - DRM_ERROR("No such drawable %d\n", update.handle); - return DRM_ERR(EINVAL); - } - - if (info == NO_DRW_INFO) { info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); if (!info) return -ENOMEM; - idr_replace(&dev->drw_idr, info, update.handle); + if (idr_replace(&dev->drw_idr, info, update.handle) == + (void*)-ENOENT) { + DRM_ERROR("No such drawable %d\n", update.handle); + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + return -EINVAL; + } } switch (update.type) { @@ -182,21 +180,7 @@ error: */ drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - struct drm_drawable_info *info; - - info = idr_find(&dev->drw_idr, id); - - if (!info) { - DRM_DEBUG("No such drawable %d\n", id); - return NULL; - } - - if (info == NO_DRW_INFO) { - DRM_DEBUG("No information for drawable %d\n", id); - return NULL; - } - - return info; + return idr_find(&dev->drw_idr, id); } EXPORT_SYMBOL(drm_get_drawable_info); @@ -204,7 +188,7 @@ static int drm_drawable_free(int idr, void *p, void *data) { struct drm_drawable_info *info = p; - if (info != NO_DRW_INFO) { + if (info) { drm_free(info->rects, info->num_rects * sizeof(drm_clip_rect_t), DRM_MEM_BUFS); drm_free(info, sizeof(*info), DRM_MEM_BUFS); From 91990946fa3f7e8e725af18d1f3a63e0c7892308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 3 Jul 2007 12:33:51 +0200 Subject: [PATCH 077/437] One more spinlock initializer cleanup. --- shared-core/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index dc00f983..2f6a6b95 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -719,7 +719,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev) INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); dev_priv->swaps_pending = 0; - dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&dev_priv->user_irq_lock); dev_priv->user_irq_refcount = 0; i915_enable_interrupt(dev); From 1814a829eb65ee53a14fa9b53fc6f3a4196dcaa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 3 Jul 2007 10:31:46 -0400 Subject: [PATCH 078/437] Don't take dev->struct_mutex twice in drm_setsareactx. --- linux-core/drm_context.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 101a298c..ff08e809 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -245,8 +245,6 @@ int drm_setsareactx(struct inode *inode, struct file *filp, if (!map) goto bad; - mutex_lock(&dev->struct_mutex); - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); if (!ctx_sarea) goto bad; From d57b7f02d2e525e5600e5d77370d7ad2b4c9b265 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 3 Jul 2007 10:41:48 -0400 Subject: [PATCH 079/437] Use idr_replace trick to eliminate struct drm_ctx_sarea_list. --- linux-core/drmP.h | 4 ---- linux-core/drm_context.c | 41 +++++++-------------------------------- linux-core/drm_drawable.c | 3 +-- 3 files changed, 8 insertions(+), 40 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 7bcd095a..c992c8d9 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -570,10 +570,6 @@ typedef struct drm_ctx_list { drm_file_t *tag; /**< associated fd private data */ } drm_ctx_list_t; -struct drm_ctx_sarea_list { - drm_map_t *map; -}; - typedef struct drm_vbl_sig { struct list_head head; unsigned int sequence; diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index ff08e809..195c7fb5 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -58,17 +58,9 @@ */ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) { - struct drm_ctx_sarea_list *ctx; - mutex_lock(&dev->struct_mutex); - ctx = idr_find(&dev->ctx_idr, ctx_handle); - if (ctx) { - idr_remove(&dev->ctx_idr, ctx_handle); - drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - } else - DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle); + idr_remove(&dev->ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); - return; } /** @@ -84,20 +76,15 @@ static int drm_ctxbitmap_next(drm_device_t * dev) { int new_id; int ret; - struct drm_ctx_sarea_list *new_ctx; - - new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - if (!new_ctx) - return -1; again: if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); return -ENOMEM; } mutex_lock(&dev->struct_mutex); - ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id); + ret = idr_get_new_above(&dev->ctx_idr, NULL, + DRM_RESERVED_CONTEXTS, &new_id); if (ret == -EAGAIN) { mutex_unlock(&dev->struct_mutex); goto again; @@ -120,15 +107,6 @@ int drm_ctxbitmap_init(drm_device_t * dev) return 0; } - - -static int drm_ctx_sarea_free(int id, void *p, void *data) -{ - struct drm_ctx_sarea_list *ctx_entry = p; - drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - return 0; -} - /** * Context bitmap cleanup. * @@ -140,7 +118,6 @@ static int drm_ctx_sarea_free(int id, void *p, void *data) void drm_ctxbitmap_cleanup(drm_device_t * dev) { mutex_lock(&dev->struct_mutex); - idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL); idr_remove_all(&dev->ctx_idr); mutex_unlock(&dev->struct_mutex); } @@ -172,19 +149,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp, drm_ctx_priv_map_t request; drm_map_t *map; drm_map_list_t *_entry; - struct drm_ctx_sarea_list *ctx_sarea; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; mutex_lock(&dev->struct_mutex); - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); - if (!ctx_sarea) { + map = idr_find(&dev->ctx_idr, request.ctx_id); + if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - map = ctx_sarea->map; mutex_unlock(&dev->struct_mutex); @@ -224,7 +199,6 @@ int drm_setsareactx(struct inode *inode, struct file *filp, drm_ctx_priv_map_t request; drm_map_t *map = NULL; drm_map_list_t *r_list = NULL; - struct drm_ctx_sarea_list *ctx_sarea; if (copy_from_user(&request, (drm_ctx_priv_map_t __user *) arg, sizeof(request))) @@ -245,12 +219,11 @@ int drm_setsareactx(struct inode *inode, struct file *filp, if (!map) goto bad; - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); - if (!ctx_sarea) + if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) goto bad; - ctx_sarea->map = map; mutex_unlock(&dev->struct_mutex); + return 0; } diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 57b62ca4..7129980b 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -113,8 +113,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); if (!info) return -ENOMEM; - if (idr_replace(&dev->drw_idr, info, update.handle) == - (void*)-ENOENT) { + if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { DRM_ERROR("No such drawable %d\n", update.handle); drm_free(info, sizeof(*info), DRM_MEM_BUFS); return -EINVAL; From 2695e8e209228dfc2e6a9b10bc118d0794602b37 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 5 Jul 2007 17:18:12 -0700 Subject: [PATCH 080/437] Convert weird rtdsc usage to get_cycles. I'm not convinced that get_cycles is the right approach here, but it's better than the weird way that rtdsc was being used. --- linux-core/xgi_misc.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 9bf8205b..a0ed18c2 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -127,12 +127,10 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // We got GE stall interrupt. ge_3d_status[0x04] = int_status | 0x04000000; - if (TRUE == is_support_auto_reset) { + if (is_support_auto_reset) { bool is_wrong_signal = FALSE; - static U32 last_int_tick_low, - last_int_tick_high; - static U32 new_int_tick_low; - static U32 continoue_int_count = 0; + static cycles_t last_tick; + static unsigned continue_int_count = 0; // OE II is busy. while (old_ge_status & 0x001c0000) { u16 check; @@ -190,19 +188,17 @@ bool xgi_ge_irq_handler(struct xgi_info * info) if (is_wrong_signal) { // Nothing but skip. - } else if (0 == continoue_int_count++) { - rdtsc(last_int_tick_low, - last_int_tick_high); + } else if (0 == continue_int_count++) { + last_tick = get_cycles(); } else { - rdtscl(new_int_tick_low); - if ((new_int_tick_low - - last_int_tick_low) > + const cycles_t new_tick = get_cycles(); + if ((new_tick - last_tick) > STALL_INTERRUPT_RESET_THRESHOLD) { - continoue_int_count = 0; - } else if (continoue_int_count >= 3) { + continue_int_count = 0; + } else if (continue_int_count >= 3) { int time_out; - continoue_int_count = 0; + continue_int_count = 0; // GE Hung up, need reset. XGI_INFO("Reset GE!\n"); From 8b18276458e93263d5d554f779227a906592ac74 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 5 Jul 2007 17:45:44 -0700 Subject: [PATCH 081/437] Major clean up of xgi_ge_irq_handler Two large blocks of code were moved out of this function into separate functions. This brought some much needed sanity to the indentation. Some dead varaibles were removed. --- linux-core/xgi_misc.c | 260 ++++++++++++++++++++---------------------- 1 file changed, 123 insertions(+), 137 deletions(-) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index a0ed18c2..6cc0f107 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -111,83 +111,136 @@ void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) static unsigned int s_invalid_begin = 0; +static bool xgi_validate_signal(volatile u8 *mmio_vbase) +{ + volatile u32 *const ge_3d_status = + (volatile u32 *)(mmio_vbase + 0x2800); + const u32 old_ge_status = ge_3d_status[0x00]; + + if (old_ge_status & 0x001c0000) { + u16 check; + + /* Check Read back status */ + *(mmio_vbase + 0x235c) = 0x80; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + + if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { + return FALSE; + } + + /* Check RO channel */ + *(mmio_vbase + 0x235c) = 0x83; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) { + return FALSE; + } + + /* Check RW channel */ + *(mmio_vbase + 0x235c) = 0x88; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) { + return FALSE; + } + + /* Check RO channel outstanding */ + *(mmio_vbase + 0x235c) = 0x8f; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) { + return FALSE; + } + + /* Check RW channel outstanding */ + *(mmio_vbase + 0x235c) = 0x90; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) { + return FALSE; + } + + /* No pending PCIE request. GE stall. */ + } + + return TRUE; +} + + +static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) +{ + volatile u32 *const ge_3d_status = + (volatile u32 *)(mmio_vbase + 0x2800); + int time_out = 0xffff; + + *(mmio_vbase + 0xb057) = 8; + while (0 != (ge_3d_status[0x00] & 0xf0000000)) { + while (0 != ((--time_out) & 0xfff)) + /* empty */ ; + + if (0 == time_out) { + u8 old_3ce; + u8 old_3cf; + u8 old_index; + u8 old_36; + + XGI_INFO("Can not reset back 0x%x!\n", + ge_3d_status[0x00]); + + *(mmio_vbase + 0xb057) = 0; + + /* Have to use 3x5.36 to reset. */ + /* Save and close dynamic gating */ + + old_3ce = *(mmio_vbase + 0x3ce); + *(mmio_vbase + 0x3ce) = 0x2a; + old_3cf = *(mmio_vbase + 0x3cf); + *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; + + /* Reset GE */ + old_index = *(mmio_vbase + 0x3d4); + *(mmio_vbase + 0x3d4) = 0x36; + old_36 = *(mmio_vbase + 0x3d5); + *(mmio_vbase + 0x3d5) = old_36 | 0x10; + + while (0 != ((--time_out) & 0xfff)) + /* empty */ ; + + *(mmio_vbase + 0x3d5) = old_36; + *(mmio_vbase + 0x3d4) = old_index; + + /* Restore dynamic gating */ + *(mmio_vbase + 0x3cf) = old_3cf; + *(mmio_vbase + 0x3ce) = old_3ce; + break; + } + } + + *(mmio_vbase + 0xb057) = 0; +} + + bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile u8 *mmio_vbase = info->mmio.vbase; - volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); - U32 int_status = ge_3d_status[4]; // interrupt status - U32 auto_reset_count = 0; + volatile u8 *const mmio_vbase = info->mmio.vbase; + volatile u32 *const ge_3d_status = + (volatile u32 *)(mmio_vbase + 0x2800); + const u32 int_status = ge_3d_status[4]; bool is_support_auto_reset = FALSE; - // Check GE on/off + /* Check GE on/off */ if (0 == (0xffffc0f0 & int_status)) { - U32 old_ge_status = ge_3d_status[0x00]; - U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + u32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + if (0 != (0x1000 & int_status)) { - // We got GE stall interrupt. + /* We got GE stall interrupt. + */ ge_3d_status[0x04] = int_status | 0x04000000; if (is_support_auto_reset) { - bool is_wrong_signal = FALSE; static cycles_t last_tick; static unsigned continue_int_count = 0; - // OE II is busy. - while (old_ge_status & 0x001c0000) { - u16 check; - // Check Read back status - *(mmio_vbase + 0x235c) = 0x80; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if ((check & 0x3f) != - ((check & 0x3f00) >> 8)) { - is_wrong_signal = TRUE; - break; - } - // Check RO channel - *(mmio_vbase + 0x235c) = 0x83; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if ((check & 0x0f) != - ((check & 0xf0) >> 4)) { - is_wrong_signal = TRUE; - break; - } - // Check RW channel - *(mmio_vbase + 0x235c) = 0x88; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if ((check & 0x0f) != - ((check & 0xf0) >> 4)) { - is_wrong_signal = TRUE; - break; - } - // Check RO channel outstanding - *(mmio_vbase + 0x235c) = 0x8f; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if (0 != (check & 0x3ff)) { - is_wrong_signal = TRUE; - break; - } - // Check RW channel outstanding - *(mmio_vbase + 0x235c) = 0x90; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if (0 != (check & 0x3ff)) { - is_wrong_signal = TRUE; - break; - } - // No pending PCIE request. GE stall. - break; - } - if (is_wrong_signal) { - // Nothing but skip. + /* OE II is busy. */ + + if (!xgi_validate_signal(mmio_vbase)) { + /* Nothing but skip. */ } else if (0 == continue_int_count++) { last_tick = get_cycles(); } else { @@ -196,90 +249,23 @@ bool xgi_ge_irq_handler(struct xgi_info * info) STALL_INTERRUPT_RESET_THRESHOLD) { continue_int_count = 0; } else if (continue_int_count >= 3) { - int time_out; - continue_int_count = 0; - // GE Hung up, need reset. + /* GE Hung up, need reset. */ XGI_INFO("Reset GE!\n"); - *(mmio_vbase + 0xb057) = 8; - time_out = 0xffff; - while (0 != - (ge_3d_status[0x00] & - 0xf0000000)) { - while (0 != - ((--time_out) & - 0xfff)) ; - if (0 == time_out) { - u8 old_3ce; - u8 old_3cf; - u8 old_index; - u8 old_36; - - XGI_INFO - ("Can not reset back 0x%lx!\n", - ge_3d_status - [0x00]); - *(mmio_vbase + - 0xb057) = 0; - // Have to use 3x5.36 to reset. - // Save and close dynamic gating - old_3ce = - *(mmio_vbase - + 0x3ce); - *(mmio_vbase + - 0x3ce) = 0x2a; - old_3cf = - *(mmio_vbase - + 0x3cf); - *(mmio_vbase + - 0x3cf) = - old_3cf & 0xfe; - // Reset GE - old_index = - *(mmio_vbase - + 0x3d4); - *(mmio_vbase + - 0x3d4) = 0x36; - old_36 = - *(mmio_vbase - + 0x3d5); - *(mmio_vbase + - 0x3d5) = - old_36 | 0x10; - while (0 != - ((--time_out) & 0xfff)) ; - *(mmio_vbase + - 0x3d5) = - old_36; - *(mmio_vbase + - 0x3d4) = - old_index; - // Restore dynamic gating - *(mmio_vbase + - 0x3cf) = - old_3cf; - *(mmio_vbase + - 0x3ce) = - old_3ce; - break; - } - } - *(mmio_vbase + 0xb057) = 0; - - // Increase Reset counter - auto_reset_count++; + xgi_ge_hang_reset(mmio_vbase); } } } - return TRUE; } else if (0 != (0x1 & int_status)) { s_invalid_begin++; ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; - return TRUE; } + + return TRUE; } + return FALSE; } From 86e75b7f7f64643c6ef2c0fef353b38753df8239 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 5 Jul 2007 17:49:13 -0700 Subject: [PATCH 082/437] Remove XGI_IOCTL_CPUID and associated cruft. --- linux-core/xgi_drv.c | 4 ---- linux-core/xgi_drv.h | 15 +++------------ linux-core/xgi_misc.c | 12 ------------ linux-core/xgi_misc.h | 1 - 4 files changed, 3 insertions(+), 29 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 44b003a8..081db19e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -960,10 +960,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, XGI_INFO("Jong-xgi_state_change \n"); xgi_state_change(info, (struct xgi_state_info *) arg_copy); break; - case XGI_ESC_CPUID: - XGI_INFO("Jong-XGI_ESC_CPUID \n"); - xgi_get_cpu_id((struct cpu_info *)arg_copy); - break; default: XGI_INFO("Jong-xgi_ioctl_default \n"); status = -EINVAL; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 360e7120..248377aa 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -234,13 +234,6 @@ struct xgi_state_info { U32 _toState; }; -struct cpu_info { - U32 _eax; - U32 _ebx; - U32 _ecx; - U32 _edx; -}; - struct xgi_mem_pid { struct list_head list; enum xgi_mem_location location; @@ -275,8 +268,7 @@ struct xgi_mem_pid { #define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) #define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) #define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 20) #define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) @@ -298,12 +290,11 @@ struct xgi_mem_pid { #define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) #define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) #define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) #define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, struct cpu_info) #define XGI_IOCTL_MAXNR 30 /* diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 6cc0f107..9712241f 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -521,18 +521,6 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) } } -int xgi_get_cpu_id(struct cpu_info *arg) -{ - int op = arg->_eax; - __asm__("cpuid":"=a"(arg->_eax), - "=b"(arg->_ebx), - "=c"(arg->_ecx), "=d"(arg->_edx) - : "0"(op)); - - XGI_INFO - ("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", - op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); -} /*memory collect function*/ extern struct list_head xgi_mempid_list; diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 4b944c4c..85cfbf2b 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -36,7 +36,6 @@ extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req); extern void xgi_ge_reset(struct xgi_info * info); extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); -extern int xgi_get_cpu_id(struct cpu_info *arg); extern void xgi_restore_registers(struct xgi_info * info); extern bool xgi_ge_irq_handler(struct xgi_info * info); From 163f8526123ffa38783fc911b5f7a19debce7f73 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 2 Jul 2007 19:31:18 +1000 Subject: [PATCH 083/437] nouveau: rewrite gpu object code Allows multiple references to a single object, needed to support PCI(E)GART scatter-gather DMA objects which would quickly fill PRAMIN if each channel had its own. Handle per-channel private instmem areas. This is needed to support NV50, but might be something we want to do on earlier chipsets at some point? Everything that touches PRAMIN is a GPU object. --- shared-core/nouveau_drv.h | 120 +++-- shared-core/nouveau_fifo.c | 66 +-- shared-core/nouveau_mem.c | 9 +- shared-core/nouveau_notifier.c | 24 +- shared-core/nouveau_object.c | 847 ++++++++++++++++++++++----------- shared-core/nouveau_state.c | 20 +- shared-core/nv04_fifo.c | 35 +- shared-core/nv10_fifo.c | 33 +- shared-core/nv20_graph.c | 51 +- shared-core/nv30_graph.c | 51 +- shared-core/nv40_fifo.c | 36 +- shared-core/nv40_graph.c | 58 ++- 12 files changed, 853 insertions(+), 497 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 3cca07fc..73793b34 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -57,18 +57,38 @@ enum nouveau_flags { NV_NFORCE2 =0x20000000 }; -struct nouveau_object -{ - struct nouveau_object *next; - struct nouveau_object *prev; +#define NVOBJ_ENGINE_SW 0 +#define NVOBJ_ENGINE_GR 1 +#define NVOBJ_ENGINE_INT 0xdeadbeef + +#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) +#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) +#define NVOBJ_FLAG_ZERO_FREE (1 << 2) +#define NVOBJ_FLAG_FAKE (1 << 3) +typedef struct nouveau_gpuobj { + struct nouveau_gpuobj *next; + struct nouveau_gpuobj *prev; + + int im_channel; + struct mem_block *im_pramin; + struct mem_block *im_backing; + + uint32_t flags; + int refcount; + + uint32_t engine; + uint32_t class; +} nouveau_gpuobj_t; + +typedef struct nouveau_gpuobj_ref { + struct nouveau_gpuobj_ref *next; + + nouveau_gpuobj_t *gpuobj; + uint32_t instance; + int channel; - - struct mem_block *instance; - - uint32_t handle; - int class; - int engine; -}; + int handle; +} nouveau_gpuobj_ref_t; struct nouveau_fifo { @@ -79,21 +99,29 @@ struct nouveau_fifo drm_local_map_t *map; /* mapping of the regs controling the fifo */ drm_local_map_t *regs; - /* dma object for the command buffer itself */ - struct mem_block *cmdbuf_mem; - struct nouveau_object *cmdbuf_obj; - uint32_t pushbuf_base; - /* notifier memory */ + + /* DMA push buffer */ + struct mem_block *cmdbuf_mem; + nouveau_gpuobj_ref_t *pushbuf; + uint32_t pushbuf_base; + + /* Notifier memory */ struct mem_block *notifier_block; struct mem_block *notifier_heap; drm_local_map_t *notifier_map; - /* PGRAPH context, for cards that keep it in RAMIN */ - struct mem_block *ramin_grctx; - /* objects belonging to this fifo */ - struct nouveau_object *objs; - /* XXX dynamic alloc ? */ - uint32_t pgraph_ctx [340]; + /* PFIFO context */ + nouveau_gpuobj_ref_t *ramfc; + + /* PGRAPH context */ + nouveau_gpuobj_ref_t *ramin_grctx; + uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */ + + /* Objects */ + nouveau_gpuobj_ref_t *ramin; /* Private instmem */ + struct mem_block *ramin_heap; /* Private PRAMIN heap */ + nouveau_gpuobj_ref_t *ramht; /* Hash table */ + nouveau_gpuobj_ref_t *ramht_refs; /* Objects referenced by RAMHT */ }; struct nouveau_config { @@ -157,6 +185,7 @@ typedef struct drm_nouveau_private { struct nouveau_engine_func Engine; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ + nouveau_gpuobj_t *ramht; uint32_t ramin_size; uint32_t ramht_offset; uint32_t ramht_size; @@ -182,9 +211,11 @@ typedef struct drm_nouveau_private { /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ uint32_t ctx_table_size; - struct mem_block *ctx_table; + nouveau_gpuobj_ref_t *ctx_table; struct nouveau_config config; + + nouveau_gpuobj_t *gpuobj_all; } drm_nouveau_private_t; @@ -205,6 +236,7 @@ extern int nouveau_mem_init_heap(struct mem_block **, extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, uint64_t size, int align2, DRMFILE); +extern void nouveau_mem_takedown(struct mem_block **heap); extern void nouveau_mem_free_block(struct mem_block *); extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap); @@ -236,22 +268,28 @@ extern int nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel); extern void nouveau_fifo_free(drm_device_t *dev, int channel); /* nouveau_object.c */ -extern int nouveau_object_init_channel(drm_device_t *, int channel, - uint32_t vram_handle, - uint32_t tt_handle); -extern void nouveau_object_takedown_channel(drm_device_t *dev, int channel); -extern void nouveau_object_cleanup(drm_device_t *dev, int channel); -extern int nouveau_ramht_insert(drm_device_t *, int channel, - uint32_t handle, struct nouveau_object *); -extern struct nouveau_object * -nouveau_object_gr_create(drm_device_t *dev, int channel, int class); -extern struct nouveau_object * -nouveau_object_dma_create(drm_device_t *dev, int channel, int class, - uint32_t offset, uint32_t size, - int access, int target); -extern void nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj); -extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); -extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem); +extern void nouveau_gpuobj_takedown(drm_device_t *dev); +extern int nouveau_gpuobj_channel_init(drm_device_t *, int channel, + uint32_t vram_h, uint32_t tt_h); +extern void nouveau_gpuobj_channel_takedown(drm_device_t *, int channel); +extern int nouveau_gpuobj_new(drm_device_t *, int channel, int size, int align, + uint32_t flags, nouveau_gpuobj_t **); +extern int nouveau_gpuobj_del(drm_device_t *, nouveau_gpuobj_t **); +extern int nouveau_gpuobj_ref_add(drm_device_t *, int channel, uint32_t handle, + nouveau_gpuobj_t *, nouveau_gpuobj_ref_t **); +extern int nouveau_gpuobj_ref_del(drm_device_t *, nouveau_gpuobj_ref_t **); +extern int nouveau_gpuobj_new_ref(drm_device_t *, int chan_obj, int chan_ref, + uint32_t handle, int size, int align, + uint32_t flags, nouveau_gpuobj_ref_t **); +extern int nouveau_gpuobj_new_fake(drm_device_t *, uint32_t offset, + uint32_t size, uint32_t flags, + nouveau_gpuobj_t**, nouveau_gpuobj_ref_t**); +extern int nouveau_gpuobj_dma_new(drm_device_t *, int channel, int class, + uint64_t offset, uint64_t size, + int access, int target, nouveau_gpuobj_t **); +extern int nouveau_gpuobj_gr_new(drm_device_t *, int channel, int class, + nouveau_gpuobj_t **); +extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); /* nouveau_irq.c */ extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); @@ -384,8 +422,8 @@ extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, #define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v)) #endif -#define INSTANCE_RD(o,i) NV_RI32((o)->start + ((i)<<2)) -#define INSTANCE_WR(o,i,v) NV_WI32((o)->start + ((i)<<2), (v)) +#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2)) +#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v)) #endif /* __NOUVEAU_DRV_H__ */ diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 81dbfcda..9f916307 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -186,10 +186,12 @@ static int nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; struct nouveau_config *config = &dev_priv->config; struct mem_block *cb; - struct nouveau_object *cb_dma = NULL; int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); + nouveau_gpuobj_t *pushbuf = NULL; + int ret; /* Defaults for unconfigured values */ if (!config->cmdbuf.location) @@ -206,37 +208,42 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } if (cb->flags & NOUVEAU_MEM_AGP) { - cb_dma = nouveau_object_dma_create(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start - dev_priv->agp_phys, - cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP); + ret = nouveau_gpuobj_dma_new + (dev, channel, NV_CLASS_DMA_IN_MEMORY, + cb->start - dev_priv->agp_phys, + cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, + &pushbuf); } else if (dev_priv->card_type != NV_04) { - cb_dma = nouveau_object_dma_create(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start - drm_get_resource_start(dev, 1), - cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM); + ret = nouveau_gpuobj_dma_new + (dev, channel, NV_CLASS_DMA_IN_MEMORY, + cb->start - drm_get_resource_start(dev, 1), + cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, + &pushbuf); } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in * VRAM. */ - cb_dma = nouveau_object_dma_create(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI); + ret = nouveau_gpuobj_dma_new + (dev, channel, NV_CLASS_DMA_IN_MEMORY, + cb->start, cb->size, NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI, &pushbuf); } - if (!cb_dma) { + if (ret) { nouveau_mem_free(dev, cb); - DRM_ERROR("Failed to alloc DMA object for command buffer\n"); - return DRM_ERR(ENOMEM); + DRM_ERROR("Error creating push buffer ctxdma: %d\n", ret); + return ret; + } + + if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf, + &chan->pushbuf))) { + DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); + return ret; } dev_priv->fifos[channel].pushbuf_base = 0; dev_priv->fifos[channel].cmdbuf_mem = cb; - dev_priv->fifos[channel].cmdbuf_obj = cb_dma; return 0; } @@ -266,6 +273,7 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, return DRM_ERR(EINVAL); (*chan_ret) = channel; chan = &dev_priv->fifos[channel]; + memset(chan, sizeof(*chan), 0); DRM_INFO("Allocating FIFO number %d\n", channel); @@ -273,18 +281,15 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, chan->used = 1; chan->filp = filp; - /* FIFO has no objects yet */ - chan->objs = NULL; - - /* allocate a command buffer, and create a dma object for the gpu */ - ret = nouveau_fifo_cmdbuf_alloc(dev, channel); + /* Setup channel's default objects */ + ret = nouveau_gpuobj_channel_init(dev, channel, vram_handle, tt_handle); if (ret) { nouveau_fifo_free(dev, channel); return ret; } - /* Setup channel's default objects */ - ret = nouveau_object_init_channel(dev, channel, vram_handle, tt_handle); + /* allocate a command buffer, and create a dma object for the gpu */ + ret = nouveau_fifo_cmdbuf_alloc(dev, channel); if (ret) { nouveau_fifo_free(dev, channel); return ret; @@ -395,13 +400,18 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); /* Deallocate command buffer */ - if (chan->cmdbuf_mem) + if (chan->pushbuf) + nouveau_gpuobj_ref_del(dev, &chan->pushbuf); + + if (chan->cmdbuf_mem) { nouveau_mem_free(dev, chan->cmdbuf_mem); + chan->cmdbuf_mem = NULL; + } nouveau_notifier_takedown_channel(dev, channel); /* Destroy objects belonging to the channel */ - nouveau_object_cleanup(dev, channel); + nouveau_gpuobj_channel_takedown(dev, channel); dev_priv->fifo_alloc_count--; } diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index d8ae52b7..49041862 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -189,7 +189,7 @@ void nouveau_mem_release(DRMFILE filp, struct mem_block *heap) /* * Cleanup everything */ -static void nouveau_mem_takedown(struct mem_block **heap) +void nouveau_mem_takedown(struct mem_block **heap) { struct mem_block *p; @@ -554,6 +554,13 @@ int nouveau_instmem_init(struct drm_device *dev) nouveau_instmem_determine_amount(dev); nouveau_instmem_configure_fixed_tables(dev); + if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, + dev_priv->ramht_size, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ALLOW_NO_REFS, + &dev_priv->ramht, NULL))) + return ret; + /* Create a heap to manage RAMIN allocations, we don't allocate * the space that was reserved for RAMHT/FC/RO. */ diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 0cfe733e..4d5e26ab 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -74,10 +74,10 @@ nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *obj; + nouveau_gpuobj_t *nobj = NULL; struct mem_block *mem; uint32_t offset; - int target; + int target, ret; if (!chan->notifier_heap) { DRM_ERROR("Channel %d doesn't have a notifier heap!\n", @@ -105,21 +105,19 @@ nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, return DRM_ERR(EINVAL); } - obj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, - offset, mem->size, NV_DMA_ACCESS_RW, - target); - if (!obj) { + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + offset, mem->size, + NV_DMA_ACCESS_RW, target, &nobj))) { nouveau_mem_free_block(mem); - DRM_ERROR("Error creating notifier ctxdma\n"); - return DRM_ERR(ENOMEM); + DRM_ERROR("Error creating notifier ctxdma: %d\n", ret); + return ret; } - obj->handle = handle; - if (nouveau_ramht_insert(dev, channel, handle, obj)) { - nouveau_object_free(dev, obj); + if ((ret = nouveau_gpuobj_ref_add(dev, channel, handle, nobj, NULL))) { + nouveau_gpuobj_del(dev, &nobj); nouveau_mem_free_block(mem); - DRM_ERROR("Error inserting notifier ctxdma into RAMHT\n"); - return DRM_ERR(ENOMEM); + DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret); + return ret; } *b_offset = mem->start; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index dac08df4..79875ca1 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -35,79 +35,6 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" -/* TODO - * - Check object class, deny unsafe objects (add card-specific versioning?) - * - Get rid of DMA object creation, this should be wrapped by MM routines. - */ - -/* Translate a RAMIN offset into a value the card understands, will be useful - * in the future when we can access more instance ram which isn't mapped into - * the PRAMIN aperture - */ -uint32_t -nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem) -{ - uint32_t inst = (uint32_t)mem->start >> 4; - DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n", - mem->start, inst); - return inst; -} - -static void -nouveau_object_link(drm_device_t *dev, struct nouveau_object *obj) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; - - if (!chan->objs) { - chan->objs = obj; - return; - } - - obj->prev = NULL; - obj->next = chan->objs; - - chan->objs->prev = obj; - chan->objs = obj; -} - -static void -nouveau_object_unlink(drm_device_t *dev, struct nouveau_object *obj) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; - - if (obj->prev == NULL) { - if (obj->next) - obj->next->prev = NULL; - chan->objs = obj->next; - } else if (obj->next == NULL) { - if (obj->prev) - obj->prev->next = NULL; - } else { - obj->prev->next = obj->next; - obj->next->prev = obj->prev; - } -} - -static struct nouveau_object * -nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *obj = chan->objs; - - DRM_DEBUG("Looking for handle 0x%08x\n", handle); - while (obj) { - if (obj->handle == handle) - return obj; - obj = obj->next; - } - - DRM_DEBUG("...couldn't find handle\n"); - return NULL; -} - /* NVidia uses context objects to drive drawing operations. Context objects can be selected into 8 subchannels in the FIFO, @@ -150,146 +77,439 @@ nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) handle >>= dev_priv->ramht_bits; } hash ^= channel << (dev_priv->ramht_bits - 4); - return hash << 3; + hash <<= 3; + + DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); + return hash; } static int -nouveau_ramht_entry_valid(drm_device_t *dev, uint32_t ramht, uint32_t offset) +nouveau_ramht_entry_valid(drm_device_t *dev, nouveau_gpuobj_t *ramht, + uint32_t offset) { drm_nouveau_private_t *dev_priv=dev->dev_private; - uint32_t ctx = NV_RI32(ramht + offset + 4); + uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); if (dev_priv->card_type < NV_40) return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); return (ctx != 0); } -int -nouveau_ramht_insert(drm_device_t* dev, int channel, uint32_t handle, - struct nouveau_object *obj) +static int +nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv=dev->dev_private; - uint32_t ramht = dev_priv->ramht_offset; + struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + nouveau_gpuobj_t *gpuobj = ref->gpuobj; uint32_t ctx, co, ho; - uint32_t inst; - inst = nouveau_chip_instance_get(dev, obj->instance); - if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | inst | - (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); - } else - if (dev_priv->card_type < NV_50) { - ctx = inst | - (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); - } else { - ctx = inst | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + if (!ramht) { + DRM_ERROR("No hash table!\n"); + return DRM_ERR(EINVAL); } - co = ho = nouveau_ramht_hash_handle(dev, channel, handle); + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | + (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = (ref->instance >> 4) | + (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + ctx = (ref->instance >> 4) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } + + co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); do { if (!nouveau_ramht_entry_valid(dev, ramht, co)) { DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - channel, co, handle, ctx); - NV_WI32(ramht + co + 0, handle); - NV_WI32(ramht + co + 4, ctx); - obj->handle = handle; + ref->channel, co, ref->handle, ctx); + INSTANCE_WR(ramht, (co + 0)/4, ref->handle); + INSTANCE_WR(ramht, (co + 4)/4, ctx); return 0; } DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", - channel, co, NV_RI32(ramht + co)); + ref->channel, co, INSTANCE_RD(ramht, co/4)); co += 8; - if (co == dev_priv->ramht_size) + if (co >= dev_priv->ramht_size) co = 0; } while (co != ho); - DRM_ERROR("RAMHT space exhausted. ch=%d\n", channel); + DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); return DRM_ERR(ENOMEM); } static void -nouveau_ramht_remove(drm_device_t* dev, struct nouveau_object *obj) +nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t ramht = dev_priv->ramht_offset; + struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; uint32_t co, ho; - co = ho = nouveau_ramht_hash_handle(dev, obj->channel, obj->handle); + if (!ramht) { + DRM_ERROR("No hash table!\n"); + return; + } + + co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && - (obj->handle == NV_RI32(ramht + co))) { + (ref->handle == INSTANCE_RD(ramht, (co/4)))) { DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - obj->channel, co, obj->handle, - NV_RI32(ramht + co + 4)); - NV_WI32(ramht + co + 0, 0x00000000); - NV_WI32(ramht + co + 4, 0x00000000); - obj->handle = ~0; + ref->channel, co, ref->handle, + INSTANCE_RD(ramht, (co + 4))); + INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); + INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); return; } co += 8; - if (co == dev_priv->ramht_size) + if (co >= dev_priv->ramht_size) co = 0; } while (co != ho); DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", - obj->channel, obj->handle); + ref->channel, ref->handle); } -static struct nouveau_object * -nouveau_object_instance_alloc(drm_device_t* dev, int channel) +int +nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, + uint32_t flags, nouveau_gpuobj_t **gpuobj_ret) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = NULL; + nouveau_gpuobj_t *gpuobj; + struct mem_block *pramin = NULL; - /* Create object struct */ - obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - if (!obj) { - DRM_ERROR("couldn't alloc memory for object\n"); - return NULL; + DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", + channel, size, align, flags); + + if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) + return DRM_ERR(EINVAL); + + if (channel >= 0) { + if (channel > nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + chan = &dev_priv->fifos[channel]; } - /* Allocate instance memory */ - obj->instance = nouveau_instmem_alloc(dev, - (dev_priv->card_type >= NV_40 ? 32 : 16), 4); - if (!obj->instance) { - DRM_ERROR("couldn't alloc RAMIN for object\n"); - drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - return NULL; + gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); + if (!gpuobj) + return DRM_ERR(ENOMEM); + DRM_DEBUG("gpuobj %p\n", gpuobj); + gpuobj->flags = flags; + gpuobj->im_channel = channel; + + /* Choose between global instmem heap, and per-channel private + * instmem heap. On ramin_heap) { + DRM_DEBUG("private heap\n"); + pramin = chan->ramin_heap; + } else + if (dev_priv->card_type < NV_50) { + DRM_DEBUG("global heap fallback\n"); + pramin = dev_priv->ramin_heap; + } + } else { + DRM_DEBUG("global heap\n"); + pramin = dev_priv->ramin_heap; } - /* Bind object to channel */ - obj->channel = channel; - obj->handle = ~0; - nouveau_object_link(dev, obj); + if (!pramin) { + DRM_ERROR("No PRAMIN heap!\n"); + return DRM_ERR(EINVAL); + } - return obj; + /* Allocate a chunk of the PRAMIN aperture */ + gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, + drm_order(align), + (DRMFILE)-2); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(ENOMEM); + } + gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; + + /* On NV50 the PRAMIN aperture is paged. When allocating from the + * global instmem heap, alloc and bind VRAM pages into the PRAMIN + * aperture. + */ + if (!chan && dev_priv->card_type >= NV_50) { + DRM_ERROR("back aperture with vram pages\n"); + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(EINVAL); + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + int i; + + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + INSTANCE_WR(gpuobj, i/4, 0); + } + + if (dev_priv->gpuobj_all) { + gpuobj->next = dev_priv->gpuobj_all; + gpuobj->next->prev = gpuobj; + } + dev_priv->gpuobj_all = gpuobj; + + *gpuobj_ret = gpuobj; + return 0; } -static void -nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) +void nouveau_gpuobj_takedown(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj = NULL; + + DRM_DEBUG("\n"); + + while ((gpuobj = dev_priv->gpuobj_all)) { + DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", + gpuobj, gpuobj->refcount); + gpuobj->refcount = 0; + nouveau_gpuobj_del(dev, &gpuobj); + } +} + +int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj; + + DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); + + if (!dev_priv || !pgpuobj || !(*pgpuobj)) + return DRM_ERR(EINVAL); + gpuobj = *pgpuobj; + + if (gpuobj->refcount != 0) { + DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); + return DRM_ERR(EINVAL); + } + + if (gpuobj->im_pramin) { + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), + DRM_MEM_DRIVER); + else + nouveau_mem_free_block(gpuobj->im_pramin); + } + + if (gpuobj->im_backing) + nouveau_mem_free(dev, gpuobj->im_backing); + + if (gpuobj->next) + gpuobj->next->prev = gpuobj->prev; + if (gpuobj->prev) + gpuobj->prev->next = gpuobj->next; + else + dev_priv->gpuobj_all = gpuobj->next; + + *pgpuobj = NULL; + drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); + return 0; +} + +static int +nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, + nouveau_gpuobj_t *gpuobj, uint32_t *inst) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *cpramin; + + if ((channel > 0) && gpuobj->im_channel != channel) { + DRM_ERROR("Channel mismatch: obj %d, ref %d\n", + gpuobj->im_channel, channel); + return DRM_ERR(EINVAL); + } + + /* card_type < NV_50) { + *inst = gpuobj->im_pramin->start; + return 0; + } + + /* NV50 channel-local instance */ + if (channel > 0) { + cpramin = dev_priv->fifos[channel].ramin->gpuobj; + *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; + return 0; + } + + /* NV50 global (VRAM) instance */ + if (gpuobj->im_channel < 0) { + /* ...from global heap */ + if (!gpuobj->im_backing) { + DRM_ERROR("AII, no VRAM backing gpuobj\n"); + return DRM_ERR(EINVAL); + } + *inst = gpuobj->im_backing->start - dev_priv->fb_phys; + return 0; + } else { + /* ...from local heap */ + cpramin = dev_priv->fifos[gpuobj->im_channel].ramin->gpuobj; + *inst = (cpramin->im_backing->start - dev_priv->fb_phys) + + (gpuobj->im_pramin->start - cpramin->im_pramin->start); + return 0; + } + + return DRM_ERR(EINVAL); +} + +int +nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, + nouveau_gpuobj_t *gpuobj, nouveau_gpuobj_ref_t **ref_ret) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = NULL; + nouveau_gpuobj_ref_t *ref; + uint32_t instance; + int ret; + + DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", channel, handle, gpuobj); + + if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) + return DRM_ERR(EINVAL); + + if (channel >= 0) { + if (channel > nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + chan = &dev_priv->fifos[channel]; + } else + if (!ref_ret) + return DRM_ERR(EINVAL); + + ret = nouveau_gpuobj_instance_get(dev, channel, gpuobj, &instance); + if (ret) + return ret; + + ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); + if (!ref) + return DRM_ERR(ENOMEM); + ref->gpuobj = gpuobj; + ref->channel = channel; + ref->instance = instance; + + if (!ref_ret) { + ref->handle = handle; + + ret = nouveau_ramht_insert(dev, ref); + if (ret) { + drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); + return ret; + } + + ref->next = chan->ramht_refs; + chan->ramht_refs = ref; + } else { + ref->handle = ~0; + *ref_ret = ref; + } + + ref->gpuobj->refcount++; + return 0; +} + +int nouveau_gpuobj_ref_del(drm_device_t *dev, nouveau_gpuobj_ref_t **pref) +{ + nouveau_gpuobj_ref_t *ref; + + DRM_DEBUG("ref %p\n", pref ? *pref : NULL); + + if (!dev || !pref || *pref == NULL) + return DRM_ERR(EINVAL); + ref = *pref; + + if (ref->handle != ~0) + nouveau_ramht_remove(dev, ref); + + if (ref->gpuobj) { + ref->gpuobj->refcount--; + + if (ref->gpuobj->refcount == 0) { + if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) + nouveau_gpuobj_del(dev, &ref->gpuobj); + } + } + + *pref = NULL; + drm_free(ref, sizeof(ref), DRM_MEM_DRIVER); + return 0; +} + +int +nouveau_gpuobj_new_ref(drm_device_t *dev, int oc, int rc, uint32_t handle, + int size, int align, uint32_t flags, + nouveau_gpuobj_ref_t **ref) +{ + nouveau_gpuobj_t *gpuobj = NULL; + int ret; + + if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) + return ret; + + if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + return 0; +} + +int +nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, + uint32_t flags, nouveau_gpuobj_t **pgpuobj, + nouveau_gpuobj_ref_t **pref) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj = NULL; int i; - /* Unbind object from channel */ - nouveau_object_unlink(dev, obj); + DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", + offset, size, flags); - /* Clean RAMIN entry */ - DRM_DEBUG("Instance entry for 0x%08x" - "(engine %d, class 0x%x) before destroy:\n", - obj->handle, obj->engine, obj->class); - for (i=0; i<(obj->instance->size/4); i++) { - DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4), - INSTANCE_RD(obj->instance, i)); - INSTANCE_WR(obj->instance, i, 0x00000000); + gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); + if (!gpuobj) + return DRM_ERR(ENOMEM); + DRM_DEBUG("gpuobj %p\n", gpuobj); + gpuobj->im_channel = -1; + gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + + gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(ENOMEM); + } + gpuobj->im_pramin->start = offset; + gpuobj->im_pramin->size = size; + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + INSTANCE_WR(gpuobj, i/4, 0); } - /* Free RAMIN */ - nouveau_instmem_free(dev, obj->instance); + if (pref) { + if ((i = nouveau_gpuobj_ref_add(dev, -1, 0, gpuobj, pref))) { + nouveau_gpuobj_del(dev, &gpuobj); + return i; + } + } + + if (pgpuobj) + *pgpuobj = gpuobj; + return 0; } /* @@ -317,63 +537,69 @@ nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) to it that can be used to set up context objects. */ -struct nouveau_object * -nouveau_object_dma_create(drm_device_t* dev, int channel, int class, - uint32_t offset, uint32_t size, - int access, int target) +static int +nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; - uint32_t frame, adjust; - uint32_t pte_flags = 0; + drm_nouveau_private_t *dev_priv = dev->dev_private; - DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n", - offset, size, target, access); - - switch (target) { - case NV_DMA_TARGET_AGP: - offset += dev_priv->agp_phys; - break; - default: - break; - } - - switch (access) { - case NV_DMA_ACCESS_RO: - break; - case NV_DMA_ACCESS_WO: - case NV_DMA_ACCESS_RW: - pte_flags |= (1 << 1); - break; - default: - DRM_ERROR("invalid access mode=%d\n", access); - return NULL; - } - - frame = offset & ~0x00000FFF; - adjust = offset & 0x00000FFF; - - obj = nouveau_object_instance_alloc(dev, channel); - if (!obj) { - DRM_ERROR("couldn't allocate DMA object\n"); - return obj; - } - - obj->engine = 0; - obj->class = class; - - INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) | - (adjust << 20) | - (access << 14) | - (target << 16) | - class)); - INSTANCE_WR(obj->instance, 1, size-1); - INSTANCE_WR(obj->instance, 2, frame | pte_flags); - INSTANCE_WR(obj->instance, 3, frame | pte_flags); - - return obj; + /*XXX: dodgy hack for now */ + if (dev_priv->card_type >= NV_50) + return 24; + if (dev_priv->card_type >= NV_40) + return 32; + return 16; } +int +nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, + uint64_t offset, uint64_t size, int access, int target, + nouveau_gpuobj_t **gpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + + DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", + channel, class, offset, size); + DRM_DEBUG("access=%d target=%d\n", access, target); + + ret = nouveau_gpuobj_new(dev, channel, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + DRM_ERROR("Error creating gpuobj: %d\n", ret); + return ret; + } + + if (dev_priv->card_type < NV_50) { + uint32_t frame, adjust, pte_flags = 0; + + if (target == NV_DMA_TARGET_AGP) + offset += dev_priv->agp_phys; + if (access != NV_DMA_ACCESS_RO) + pte_flags |= (1<<1); + frame = offset & ~0x00000fff; + adjust = offset & 0x00000fff; + + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + INSTANCE_WR(*gpuobj, 1, size - 1); + INSTANCE_WR(*gpuobj, 2, frame | pte_flags); + INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + } else { + nouveau_gpuobj_del(dev, gpuobj); + DRM_ERROR("stub\n"); + return DRM_ERR(EINVAL); + } + + (*gpuobj)->engine = NVOBJ_ENGINE_SW; + (*gpuobj)->class = class; + return 0; +} /* Context objects in the instance RAM have the following structure. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. @@ -426,89 +652,142 @@ nouveau_object_dma_create(drm_device_t* dev, int channel, int class, entry[5]: set to 0? */ -struct nouveau_object * -nouveau_object_gr_create(drm_device_t* dev, int channel, int class) +int +nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, + nouveau_gpuobj_t **gpuobj) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; - DRM_DEBUG("class=%x\n", class); + DRM_DEBUG("ch%d class=0x%04x\n", channel, class); - obj = nouveau_object_instance_alloc(dev, channel); - if (!obj) { - DRM_ERROR("couldn't allocate context object\n"); - return obj; + ret = nouveau_gpuobj_new(dev, channel, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + DRM_ERROR("Error creating gpuobj: %d\n", ret); + return ret; } - obj->engine = 1; - obj->class = class; + if (dev_priv->card_type >= NV_50) { + nouveau_gpuobj_del(dev, gpuobj); + DRM_ERROR("stub!\n"); + return DRM_ERR(EINVAL); + } switch (class) { case NV_CLASS_NULL: - INSTANCE_WR(obj->instance, 0, 0x00001030); - INSTANCE_WR(obj->instance, 1, 0xFFFFFFFF); - INSTANCE_WR(obj->instance, 2, 0x00000000); - INSTANCE_WR(obj->instance, 2, 0x00000000); + INSTANCE_WR(*gpuobj, 0, 0x00001030); + INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - INSTANCE_WR(obj->instance, 0, obj->class); - INSTANCE_WR(obj->instance, 1, 0x00000000); + INSTANCE_WR(*gpuobj, 0, class); #ifdef __BIG_ENDIAN - INSTANCE_WR(obj->instance, 2, 0x01000000); -#else - INSTANCE_WR(obj->instance, 2, 0x00000000); + INSTANCE_WR(*gpuobj, 2, 0x01000000); #endif - INSTANCE_WR(obj->instance, 3, 0x00000000); - INSTANCE_WR(obj->instance, 4, 0x00000000); - INSTANCE_WR(obj->instance, 5, 0x00000000); - INSTANCE_WR(obj->instance, 6, 0x00000000); - INSTANCE_WR(obj->instance, 7, 0x00000000); } else { #ifdef __BIG_ENDIAN - INSTANCE_WR(obj->instance, 0, obj->class | 0x00080000); + INSTANCE_WR(*gpuobj, 0, class | 0x00080000); #else - INSTANCE_WR(obj->instance, 0, obj->class); + INSTANCE_WR(*gpuobj, 0, class); #endif - INSTANCE_WR(obj->instance, 1, 0x00000000); - INSTANCE_WR(obj->instance, 2, 0x00000000); - INSTANCE_WR(obj->instance, 3, 0x00000000); } } - return obj; + (*gpuobj)->engine = NVOBJ_ENGINE_GR; + (*gpuobj)->class = class; + return 0; } -void -nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) +static int +nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) { - nouveau_object_instance_free(dev, obj); - if (obj->handle != ~0) - nouveau_ramht_remove(dev, obj); - drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + nouveau_gpuobj_t *pramin = NULL; + int size, base, ret; + + DRM_DEBUG("ch%d\n", channel); + + /* Base amount for object storage (4KiB enough?) */ + size = 0x1000; + base = 0; + + /* PGRAPH context */ + + if (dev_priv->card_type == NV_50) { + /* RAMHT, RAMFC, PD, funny header thingo */ + } + + DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", + channel, size, base); + ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, size, 0x1000, 0, + &chan->ramin); + if (ret) { + DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); + return ret; + } + pramin = chan->ramin->gpuobj; + + ret = nouveau_mem_init_heap(&chan->ramin_heap, + pramin->im_pramin->start + base, size); + if (ret) { + DRM_ERROR("Error creating PRAMIN heap: %d\n", ret); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + return ret; + } + + return 0; } int -nouveau_object_init_channel(drm_device_t *dev, int channel, - uint32_t vram_handle, - uint32_t tt_handle) +nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, + uint32_t vram_h, uint32_t tt_h) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *gpuobj; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + nouveau_gpuobj_t *vram = NULL, *tt = NULL; int ret; - /* VRAM ctxdma */ - gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM); - if (!gpuobj) { - DRM_ERROR("Error creating VRAM ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); + DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); + + /* Reserve a block of PRAMIN for the channel + *XXX: maybe on card_type == NV_50) { + ret = nouveau_gpuobj_channel_init_pramin(dev, channel); + if (ret) + return ret; } - ret = nouveau_ramht_insert(dev, channel, vram_handle, gpuobj); - if (ret) { + /* RAMHT */ + if (dev_priv->card_type < NV_50) { + ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, + &chan->ramht); + if (ret) + return ret; + } else { + ret = nouveau_gpuobj_new_ref(dev, channel, channel, 0, + 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramht); + if (ret) + return ret; + } + + /* VRAM ctxdma */ + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &vram))) { + DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); + return ret; + } + + if ((ret = nouveau_gpuobj_ref_add(dev, channel, vram_h, vram, NULL))) { DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); return ret; } @@ -518,17 +797,15 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return 0; /* GART ctxdma */ - gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP); - if (!gpuobj) { - DRM_ERROR("Error creating TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, NV_DMA_TARGET_AGP, + &tt))) { + DRM_ERROR("Error creating TT ctxdma: %d\n", ret); + return ret; } - ret = nouveau_ramht_insert(dev, channel, tt_handle, gpuobj); - if (ret) { + if ((ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL))) { DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); return ret; } @@ -536,20 +813,34 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return 0; } -void nouveau_object_cleanup(drm_device_t *dev, int channel) +void +nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + nouveau_gpuobj_ref_t *ref; - while (dev_priv->fifos[channel].objs) { - nouveau_object_free(dev, dev_priv->fifos[channel].objs); + DRM_DEBUG("ch%d\n", channel); + + while ((ref = chan->ramht_refs)) { + chan->ramht_refs = ref->next; + nouveau_gpuobj_ref_del(dev, &ref); } + nouveau_gpuobj_ref_del(dev, &chan->ramht); + + if (chan->ramin_heap) + nouveau_mem_takedown(&chan->ramin_heap); + if (chan->ramin) + nouveau_gpuobj_ref_del(dev, &chan->ramin); + } int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_nouveau_grobj_alloc_t init; - struct nouveau_object *obj; + nouveau_gpuobj_t *gr = NULL; + int ret; DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) data, sizeof(init)); @@ -561,20 +852,20 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) } //FIXME: check args, only allow trusted objects to be created + //FIXME: check for pre-existing handle - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); - return DRM_ERR(EINVAL); + if ((ret = nouveau_gpuobj_gr_new(dev, init.channel, init.class, &gr))) { + DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", + ret, init.channel, init.handle); + return ret; } - obj = nouveau_object_gr_create(dev, init.channel, init.class); - if (!obj) - return DRM_ERR(ENOMEM); - - if (nouveau_ramht_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_ref_add(dev, init.channel, init.handle, + gr, NULL))) { + DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", + ret, init.channel, init.handle); + nouveau_gpuobj_del(dev, &gr); + return ret; } return 0; diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index fa773d28..13bc930a 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -283,6 +283,20 @@ static int nouveau_card_init(drm_device_t *dev) return 0; } +static void nouveau_card_takedown(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; + + engine->fifo.takedown(dev); + engine->graph.takedown(dev); + engine->fb.takedown(dev); + engine->timer.takedown(dev); + engine->mc.takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); +} + /* here a client dies, release the stuff that was allocated for its filp */ void nouveau_preclose(drm_device_t * dev, DRMFILE filp) { @@ -314,11 +328,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) if (flags==NV_UNKNOWN) return DRM_ERR(EINVAL); - dev_priv = drm_alloc(sizeof(drm_nouveau_private_t), DRM_MEM_DRIVER); + dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER); if (!dev_priv) return DRM_ERR(ENOMEM); - memset(dev_priv, 0, sizeof(drm_nouveau_private_t)); dev_priv->card_type=flags&NOUVEAU_FAMILY; dev_priv->flags=flags&NOUVEAU_FLAGS; @@ -338,6 +351,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) void nouveau_lastclose(struct drm_device *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; + + nouveau_card_takedown(dev); + if(dev_priv->fb_mtrr>0) { drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index bfae432e..b84f74c1 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -28,8 +28,10 @@ #include "drm.h" #include "nouveau_drv.h" -#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV04_RAMFC_##offset, (val)) -#define RAMFC_RD(offset) NV_RI32(fifoctx + NV04_RAMFC_##offset) +#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \ + NV04_RAMFC_##offset/4, (val)) +#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ + NV04_RAMFC_##offset/4) #define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) #define NV04_RAMFC__SIZE 32 @@ -38,21 +40,19 @@ nv04_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *pb = chan->cmdbuf_obj; - uint32_t fifoctx = NV04_RAMFC(channel); - int i; + int ret; - if (!pb || !pb->instance) - return DRM_ERR(EINVAL); + if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(channel), + NV04_RAMFC__SIZE, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + NULL, &chan->ramfc))) + return ret; - /* Clear RAMFC */ - for (i=0; ipushbuf_base); RAMFC_WR(DMA_GET, chan->pushbuf_base); - RAMFC_WR(DMA_INSTANCE, nouveau_chip_instance_get(dev, pb->instance)); + RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -67,18 +67,17 @@ void nv04_fifo_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV04_RAMFC(channel); - int i; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - for (i=0; iramfc) + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int nv04_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV04_RAMFC(channel); + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t tmp; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); @@ -106,7 +105,7 @@ int nv04_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV04_RAMFC(channel); + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index b84971de..07ec4635 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -28,8 +28,11 @@ #include "drm.h" #include "nouveau_drv.h" -#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV10_RAMFC_##offset, (val)) -#define RAMFC_RD(offset) NV_RI32(fifoctx + NV10_RAMFC_##offset) + +#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \ + NV10_RAMFC_##offset/4, (val)) +#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ + NV10_RAMFC_##offset/4) #define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE) #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) @@ -38,20 +41,21 @@ nv10_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t fifoctx = NV10_RAMFC(channel), pushbuf; - int i; + int ret; - pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - - for (i=0; iramfc))) + return ret; /* Fill entries that are seen filled in dumps of nvidia driver just * after channel's is put into DMA mode */ RAMFC_WR(DMA_PUT , chan->pushbuf_base); RAMFC_WR(DMA_GET , chan->pushbuf_base); - RAMFC_WR(DMA_INSTANCE , pushbuf); + RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -67,18 +71,17 @@ void nv10_fifo_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV10_RAMFC(channel); - int i; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - for (i=0; iramfc) + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int nv10_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV10_RAMFC(channel); + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t tmp; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); @@ -120,7 +123,7 @@ int nv10_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV10_RAMFC(channel); + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 1b8a6727..13271051 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -34,19 +34,18 @@ int nv20_graph_create_context(drm_device_t *dev, int channel) { (drm_nouveau_private_t *)dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; unsigned int ctx_size = NV20_GRCTX_SIZE; - int i; + int ret; - /* Alloc and clear RAMIN to store the context */ - chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4); - if (!chan->ramin_grctx) - return DRM_ERR(ENOMEM); - for (i=0; iramin_grctx, i/4, 0x00000000); + if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramin_grctx))) + return ret; /* Initialise default context values */ - INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */ + INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */ - INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx)); + INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, + chan->ramin_grctx->instance >> 4); return 0; } @@ -54,12 +53,10 @@ void nv20_graph_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - if (chan->ramin_grctx) { - nouveau_instmem_free(dev, chan->ramin_grctx); - chan->ramin_grctx = NULL; - } + if (chan->ramin_grctx) + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - INSTANCE_WR(dev_priv->ctx_table, channel, 0); + INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0); } static void nv20_graph_rdi(drm_device_t *dev) { @@ -79,13 +76,14 @@ static void nv20_graph_rdi(drm_device_t *dev) { int nv20_graph_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t instance; - instance = INSTANCE_RD(dev_priv->ctx_table, channel); + instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); if (!instance) { return DRM_ERR(EINVAL); } - if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) + if (instance != (chan->ramin_grctx->instance >> 4)) DRM_ERROR("nv20_graph_save_context : bad instance\n"); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); @@ -99,13 +97,14 @@ int nv20_graph_save_context(drm_device_t *dev, int channel) { int nv20_graph_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t instance; - instance = INSTANCE_RD(dev_priv->ctx_table, channel); + instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); if (!instance) { return DRM_ERR(EINVAL); } - if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) + if (instance != (chan->ramin_grctx->instance >> 4)) DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); @@ -148,8 +147,8 @@ void nouveau_nv20_context_switch(drm_device_t *dev) int nv20_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - int i; uint32_t tmp, vramsz; + int ret, i; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); @@ -158,14 +157,14 @@ int nv20_graph_init(drm_device_t *dev) { /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; - dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4); - if (!dev_priv->ctx_table) - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, + dev_priv->ctx_table_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->ctx_table))) + return ret; - for (i=0; i< dev_priv->ctx_table_size; i+=4) - INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000); - - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table)); + NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, + dev_priv->ctx_table->instance >> 4); //XXX need to be done and save/restore for each fifo ??? nv20_graph_rdi(dev); diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 7a87990a..65f4f868 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -16,7 +16,7 @@ * contexts are taken from dumps just after the 3D object is * created. */ -static void nv30_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void nv30_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -105,9 +105,9 @@ int nv30_graph_create_context(drm_device_t *dev, int channel) drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - void (*ctx_init)(drm_device_t *, struct mem_block *); + void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *); unsigned int ctx_size; - int i; + int ret; switch (dev_priv->chipset) { default: @@ -116,18 +116,17 @@ int nv30_graph_create_context(drm_device_t *dev, int channel) break; } - /* Alloc and clear RAMIN to store the context */ - chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4); - if (!chan->ramin_grctx) - return DRM_ERR(ENOMEM); - for (i=0; iramin_grctx, i/4, 0x00000000); + if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramin_grctx))) + return ret; /* Initialise default context values */ - ctx_init(dev, chan->ramin_grctx); + ctx_init(dev, chan->ramin_grctx->gpuobj); - INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */ - INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx)); + INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */ + INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, + chan->ramin_grctx->instance >> 4); return 0; } @@ -138,12 +137,10 @@ void nv30_graph_destroy_context(drm_device_t *dev, int channel) (drm_nouveau_private_t *)dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - if (chan->ramin_grctx) { - nouveau_instmem_free(dev, chan->ramin_grctx); - chan->ramin_grctx = NULL; - } + if (chan->ramin_grctx) + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - INSTANCE_WR(dev_priv->ctx_table, channel, 0); + INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0); } static int @@ -172,7 +169,7 @@ int nv30_graph_load_context(drm_device_t *dev, int channel) if (!chan->ramin_grctx) return DRM_ERR(EINVAL); - inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + inst = chan->ramin_grctx->instance >> 4; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, @@ -189,7 +186,7 @@ int nv30_graph_save_context(drm_device_t *dev, int channel) if (!chan->ramin_grctx) return DRM_ERR(EINVAL); - inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + inst = chan->ramin_grctx->instance >> 4; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, @@ -203,7 +200,7 @@ int nv30_graph_init(drm_device_t *dev) drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; uint32_t vramsz, tmp; - int i; + int ret, i; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); @@ -212,14 +209,14 @@ int nv30_graph_init(drm_device_t *dev) /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; - dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4); - if (!dev_priv->ctx_table) - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, + dev_priv->ctx_table_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->ctx_table))) + return ret; - for (i=0; i< dev_priv->ctx_table_size; i+=4) - INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000); - - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table)); + NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, + dev_priv->ctx_table->instance >> 4); NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index 6f25349c..eed3e45b 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -28,8 +28,11 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" -#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV40_RAMFC_##offset, (val)) -#define RAMFC_RD(offset) NV_RI32(fifoctx + NV40_RAMFC_##offset) + +#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \ + NV40_RAMFC_##offset/4, (val)) +#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ + NV40_RAMFC_##offset/4) #define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE)) #define NV40_RAMFC__SIZE 128 @@ -38,21 +41,21 @@ nv40_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t fifoctx = NV40_RAMFC(channel), grctx, pushbuf; - int i; + int ret; - for (i = 0; i < NV40_RAMFC__SIZE; i+=4) - NV_WI32(fifoctx + i, 0); - - grctx = nouveau_chip_instance_get(dev, chan->ramin_grctx); - pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(channel), + NV40_RAMFC__SIZE, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + NULL, &chan->ramfc))) + return ret; /* Fill entries that are seen filled in dumps of nvidia driver just * after channel's is put into DMA mode */ RAMFC_WR(DMA_PUT , chan->pushbuf_base); RAMFC_WR(DMA_GET , chan->pushbuf_base); - RAMFC_WR(DMA_INSTANCE , pushbuf); + RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -61,7 +64,7 @@ nv40_fifo_create_context(drm_device_t *dev, int channel) #endif 0x30000000 /* no idea.. */); RAMFC_WR(DMA_SUBROUTINE, 0); - RAMFC_WR(GRCTX_INSTANCE, grctx); + RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4); RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); return 0; @@ -71,18 +74,17 @@ void nv40_fifo_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV40_RAMFC(channel); - int i; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - for (i = 0; i < NV40_RAMFC__SIZE; i+=4) - NV_WI32(fifoctx + i, 0); + if (chan->ramfc) + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int nv40_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV40_RAMFC(channel); + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t tmp, tmp2; NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); @@ -141,7 +143,7 @@ int nv40_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx = NV40_RAMFC(channel); + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 6fb575db..3f33cee6 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -47,13 +47,13 @@ * created. */ static void -nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv40_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; /* Always has the "instance address" of itself at offset 0 */ - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); /* unknown */ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); @@ -188,12 +188,12 @@ nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } static void -nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv43_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00030/4, 0x00000001); @@ -304,12 +304,12 @@ nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx) }; static void -nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv46_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff); INSTANCE_WR(ctx, 0x0004c/4, 0x00000001); @@ -455,12 +455,12 @@ nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } static void -nv49_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv49_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); @@ -678,12 +678,12 @@ nv49_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } static void -nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv4a_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00030/4, 0x00000001); @@ -795,12 +795,12 @@ nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } static void -nv4b_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv4b_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); @@ -1010,12 +1010,12 @@ nv4b_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } static void -nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv4c_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00030/4, 0x00000001); @@ -1117,12 +1117,12 @@ nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } static void -nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +nv4e_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; - INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); INSTANCE_WR(ctx, 0x00030/4, 0x00000001); @@ -1229,9 +1229,9 @@ nv40_graph_create_context(drm_device_t *dev, int channel) drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - void (*ctx_init)(drm_device_t *, struct mem_block *); + void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *); unsigned int ctx_size; - int i; + int ret; switch (dev_priv->chipset) { case 0x40: @@ -1272,15 +1272,13 @@ nv40_graph_create_context(drm_device_t *dev, int channel) break; } - /* Alloc and clear RAMIN to store the context */ - chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4); - if (!chan->ramin_grctx) - return DRM_ERR(ENOMEM); - for (i=0; iramin_grctx, i/4, 0x00000000); + if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramin_grctx))) + return ret; /* Initialise default context values */ - ctx_init(dev, chan->ramin_grctx); + ctx_init(dev, chan->ramin_grctx->gpuobj); return 0; } @@ -1291,10 +1289,8 @@ nv40_graph_destroy_context(drm_device_t *dev, int channel) drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - if (chan->ramin_grctx) { - nouveau_instmem_free(dev, chan->ramin_grctx); - chan->ramin_grctx = NULL; - } + if (chan->ramin_grctx) + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); } static int @@ -1339,7 +1335,7 @@ nv40_graph_save_context(drm_device_t *dev, int channel) if (!chan->ramin_grctx) return DRM_ERR(EINVAL); - inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + inst = chan->ramin_grctx->instance >> 4; return nv40_graph_transfer_context(dev, inst, 1); } @@ -1357,7 +1353,7 @@ nv40_graph_load_context(drm_device_t *dev, int channel) if (!chan->ramin_grctx) return DRM_ERR(EINVAL); - inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + inst = chan->ramin_grctx->instance >> 4; ret = nv40_graph_transfer_context(dev, inst, 0); if (ret) From 3324342e42b78aef8e90e11273776dd2b3b92074 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 4 Jul 2007 15:31:01 +1000 Subject: [PATCH 084/437] nouveau: enable reporting for all PFIFO/PGRAPH irqs --- shared-core/nouveau_irq.c | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 8de6e705..72b12e0c 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -87,34 +87,14 @@ void nouveau_irq_postinstall(drm_device_t *dev) DRM_DEBUG("IRQ: postinst\n"); /* Enable PFIFO error reporting */ - NV_WRITE(NV03_PFIFO_INTR_EN_0 , - NV_PFIFO_INTR_CACHE_ERROR | - NV_PFIFO_INTR_RUNOUT | - NV_PFIFO_INTR_RUNOUT_OVERFLOW | - NV_PFIFO_INTR_DMA_PUSHER | - NV_PFIFO_INTR_DMA_PT | - NV_PFIFO_INTR_SEMAPHORE | - NV_PFIFO_INTR_ACQUIRE_TIMEOUT - ); + NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); /* Enable PGRAPH interrupts */ if (dev_priv->card_type Date: Thu, 5 Jul 2007 00:12:33 +1000 Subject: [PATCH 085/437] nouveau/nv50: Initial channel/object support Should be OK on G84 for a single channel, multiple channels *almost* work. Untested on G80. --- linux-core/Makefile.kernel | 3 +- linux-core/nv04_instmem.c | 1 + linux-core/nv50_instmem.c | 1 + shared-core/nouveau_drv.h | 46 ++++-- shared-core/nouveau_fifo.c | 99 +++++++------ shared-core/nouveau_irq.c | 15 +- shared-core/nouveau_mem.c | 143 +----------------- shared-core/nouveau_notifier.c | 6 +- shared-core/nouveau_object.c | 70 +++++---- shared-core/nouveau_reg.h | 12 +- shared-core/nouveau_state.c | 41 +++++- shared-core/nv04_fifo.c | 13 +- shared-core/nv04_graph.c | 10 +- shared-core/nv04_instmem.c | 165 +++++++++++++++++++++ shared-core/nv10_fifo.c | 12 +- shared-core/nv10_graph.c | 8 +- shared-core/nv20_graph.c | 8 +- shared-core/nv30_graph.c | 8 +- shared-core/nv40_fifo.c | 12 +- shared-core/nv40_graph.c | 8 +- shared-core/nv50_fifo.c | 259 +++++++++++++++++++++++++++++++- shared-core/nv50_graph.c | 233 ++++++++++++++++++++++++++++- shared-core/nv50_instmem.c | 262 +++++++++++++++++++++++++++++++++ 23 files changed, 1157 insertions(+), 278 deletions(-) create mode 120000 linux-core/nv04_instmem.c create mode 120000 linux-core/nv50_instmem.c create mode 100644 shared-core/nv04_instmem.c create mode 100644 shared-core/nv50_instmem.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 478c4df0..be2641c8 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,8 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ - nv40_graph.o nv50_graph.o + nv40_graph.o nv50_graph.o \ + nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o diff --git a/linux-core/nv04_instmem.c b/linux-core/nv04_instmem.c new file mode 120000 index 00000000..e720fb5b --- /dev/null +++ b/linux-core/nv04_instmem.c @@ -0,0 +1 @@ +../shared-core/nv04_instmem.c \ No newline at end of file diff --git a/linux-core/nv50_instmem.c b/linux-core/nv50_instmem.c new file mode 120000 index 00000000..4e45344a --- /dev/null +++ b/linux-core/nv50_instmem.c @@ -0,0 +1 @@ +../shared-core/nv50_instmem.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 73793b34..81972db5 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -72,6 +72,7 @@ typedef struct nouveau_gpuobj { int im_channel; struct mem_block *im_pramin; struct mem_block *im_backing; + int im_bound; uint32_t flags; int refcount; @@ -92,7 +93,6 @@ typedef struct nouveau_gpuobj_ref { struct nouveau_fifo { - int used; /* owner of this fifo */ DRMFILE filp; /* mapping of the fifo itself */ @@ -101,8 +101,8 @@ struct nouveau_fifo drm_local_map_t *regs; /* DMA push buffer */ - struct mem_block *cmdbuf_mem; nouveau_gpuobj_ref_t *pushbuf; + struct mem_block *pushbuf_mem; uint32_t pushbuf_base; /* Notifier memory */ @@ -132,6 +132,19 @@ struct nouveau_config { }; typedef struct nouveau_engine_func { + struct { + void *priv; + + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + + int (*populate)(drm_device_t *, nouveau_gpuobj_t *, + uint32_t *size); + void (*clear)(drm_device_t *, nouveau_gpuobj_t *); + int (*bind)(drm_device_t *, nouveau_gpuobj_t *); + int (*unbind)(drm_device_t *, nouveau_gpuobj_t *); + } instmem; + struct { int (*init)(drm_device_t *dev); void (*takedown)(drm_device_t *dev); @@ -158,6 +171,8 @@ typedef struct nouveau_engine_func { } graph; struct { + void *priv; + int (*init)(drm_device_t *); void (*takedown)(drm_device_t *); @@ -180,13 +195,13 @@ typedef struct drm_nouveau_private { drm_local_map_t *ramin; /* NV40 onwards */ int fifo_alloc_count; - struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER]; + struct nouveau_fifo *fifos[NV_MAX_FIFO_NUMBER]; struct nouveau_engine_func Engine; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ nouveau_gpuobj_t *ramht; - uint32_t ramin_size; + uint32_t ramin_rsvd_vram; uint32_t ramht_offset; uint32_t ramht_size; uint32_t ramht_bits; @@ -246,11 +261,6 @@ extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*); extern int nouveau_mem_init(struct drm_device *dev); extern void nouveau_mem_close(struct drm_device *dev); -extern int nouveau_instmem_init(struct drm_device *dev); -extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev, - uint32_t size, uint32_t align); -extern void nouveau_instmem_free(struct drm_device *dev, - struct mem_block *block); /* nouveau_notifier.c */ extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE); @@ -386,6 +396,24 @@ extern void nv50_graph_destroy_context(drm_device_t *, int channel); extern int nv50_graph_load_context(drm_device_t *, int channel); extern int nv50_graph_save_context(drm_device_t *, int channel); +/* nv04_instmem.c */ +extern int nv04_instmem_init(drm_device_t *dev); +extern void nv04_instmem_takedown(drm_device_t *dev); +extern int nv04_instmem_populate(drm_device_t*, nouveau_gpuobj_t*, + uint32_t *size); +extern void nv04_instmem_clear(drm_device_t*, nouveau_gpuobj_t*); +extern int nv04_instmem_bind(drm_device_t*, nouveau_gpuobj_t*); +extern int nv04_instmem_unbind(drm_device_t*, nouveau_gpuobj_t*); + +/* nv50_instmem.c */ +extern int nv50_instmem_init(drm_device_t *dev); +extern void nv50_instmem_takedown(drm_device_t *dev); +extern int nv50_instmem_populate(drm_device_t*, nouveau_gpuobj_t*, + uint32_t *size); +extern void nv50_instmem_clear(drm_device_t*, nouveau_gpuobj_t*); +extern int nv50_instmem_bind(drm_device_t*, nouveau_gpuobj_t*); +extern int nv50_instmem_unbind(drm_device_t*, nouveau_gpuobj_t*); + /* nv04_mc.c */ extern int nv04_mc_init(drm_device_t *dev); extern void nv04_mc_takedown(drm_device_t *dev); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 9f916307..c140a634 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -39,6 +39,8 @@ int nouveau_fifo_number(drm_device_t* dev) case NV_04: case NV_05: return 16; + case NV_50: + return 128; default: return 32; } @@ -186,7 +188,7 @@ static int nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_config *config = &dev_priv->config; struct mem_block *cb; int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); @@ -242,8 +244,8 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) return ret; } - dev_priv->fifos[channel].pushbuf_base = 0; - dev_priv->fifos[channel].cmdbuf_mem = cb; + dev_priv->fifos[channel]->pushbuf_base = 0; + dev_priv->fifos[channel]->pushbuf_mem = cb; return 0; } @@ -265,22 +267,27 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, * (woo, full userspace command submission !) * When there are no more contexts, you lost */ - for(channel=0; channelfifos[channel].used==0) + for(channel=0; channelcard_type == NV_50) && (channel == 0)) + continue; + if (dev_priv->fifos[channel] == NULL) break; + } /* no more fifos. you lost. */ if (channel==nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); (*chan_ret) = channel; - chan = &dev_priv->fifos[channel]; - memset(chan, sizeof(*chan), 0); + + dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_fifo), + DRM_MEM_DRIVER); + if (!dev_priv->fifos[channel]) + return DRM_ERR(ENOMEM); + dev_priv->fifo_alloc_count++; + chan = dev_priv->fifos[channel]; + chan->filp = filp; DRM_INFO("Allocating FIFO number %d\n", channel); - /* that fifo is used */ - chan->used = 1; - chan->filp = filp; - /* Setup channel's default objects */ ret = nouveau_gpuobj_channel_init(dev, channel, vram_handle, tt_handle); if (ret) { @@ -324,17 +331,19 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, return ret; } - /* enable the fifo dma operation */ - NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<pushbuf_base); - NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); + if (dev_priv->card_type < NV_50) { + NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base); + NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); + } else { + NV_WRITE(NV50_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base); + NV_WRITE(NV50_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); + } /* If this is the first channel, setup PFIFO ourselves. For any * other case, the GPU will handle this when it switches contexts. */ - if (dev_priv->fifo_alloc_count == 0) { + if (dev_priv->fifo_alloc_count == 1) { ret = engine->fifo.load_context(dev, channel); if (ret) { nouveau_fifo_free(dev, channel); @@ -352,7 +361,7 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, * set. Proper fix would be to find which object+method is * responsible for modifying this state. */ - if (dev_priv->chipset >= 0x10) { + if (dev_priv->chipset >= 0x10 && dev_priv->chipset < 0x50) { uint32_t tmp; tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); @@ -361,15 +370,14 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, } } - NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, + NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); /* reenable the fifo caches */ - NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); - - dev_priv->fifo_alloc_count++; + NV_WRITE(NV03_PFIFO_CACHES, 1); DRM_INFO("%s: initialised FIFO %d\n", __func__, channel); return 0; @@ -380,17 +388,20 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; nouveau_engine_func_t *engine = &dev_priv->Engine; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + if (!chan) { + DRM_ERROR("Freeing non-existant channel %d\n", channel); + return; + } - chan->used = 0; DRM_INFO("%s: freeing fifo %d\n", __func__, channel); /* disable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); - NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<fifo.destroy_context(dev, channel); /* Cleanup PGRAPH state */ @@ -399,13 +410,11 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) /* reenable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); - /* Deallocate command buffer */ - if (chan->pushbuf) - nouveau_gpuobj_ref_del(dev, &chan->pushbuf); - - if (chan->cmdbuf_mem) { - nouveau_mem_free(dev, chan->cmdbuf_mem); - chan->cmdbuf_mem = NULL; + /* Deallocate push buffer */ + nouveau_gpuobj_ref_del(dev, &chan->pushbuf); + if (chan->pushbuf_mem) { + nouveau_mem_free(dev, chan->pushbuf_mem); + chan->pushbuf_mem = NULL; } nouveau_notifier_takedown_channel(dev, channel); @@ -413,7 +422,9 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) /* Destroy objects belonging to the channel */ nouveau_gpuobj_channel_takedown(dev, channel); + dev_priv->fifos[channel] = NULL; dev_priv->fifo_alloc_count--; + drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); } /* cleanups all the fifos from filp */ @@ -424,7 +435,7 @@ void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp) DRM_DEBUG("clearing FIFO enables from filp\n"); for(i=0;ififos[i].used && dev_priv->fifos[i].filp==filp) + if (dev_priv->fifos[i] && dev_priv->fifos[i]->filp==filp) nouveau_fifo_free(dev,i); } @@ -435,9 +446,9 @@ nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel) if (channel >= nouveau_fifo_number(dev)) return 0; - if (dev_priv->fifos[channel].used == 0) + if (dev_priv->fifos[channel] == NULL) return 0; - return (dev_priv->fifos[channel].filp == filp); + return (dev_priv->fifos[channel]->filp == filp); } /*********************************** @@ -460,22 +471,28 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) init.tt_ctxdma_handle); if (res) return res; - chan = &dev_priv->fifos[init.channel]; + chan = dev_priv->fifos[init.channel]; init.put_base = chan->pushbuf_base; /* make the fifo available to user space */ /* first, the fifo control regs */ - init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel); - init.ctrl_size = NV03_FIFO_REGS_SIZE; + init.ctrl = dev_priv->mmio->offset; + if (dev_priv->card_type < NV_50) { + init.ctrl += NV03_FIFO_REGS(init.channel); + init.ctrl_size = NV03_FIFO_REGS_SIZE; + } else { + init.ctrl += NV50_FIFO_REGS(init.channel); + init.ctrl_size = NV50_FIFO_REGS_SIZE; + } res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS, 0, &chan->regs); if (res != 0) return res; /* pass back FIFO map info to the caller */ - init.cmdbuf = chan->cmdbuf_mem->start; - init.cmdbuf_size = chan->cmdbuf_mem->size; + init.cmdbuf = chan->pushbuf_mem->start; + init.cmdbuf_size = chan->pushbuf_mem->size; /* and the notifier block */ init.notifier = chan->notifier_block->start; diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 72b12e0c..b4102dd8 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -251,22 +251,25 @@ nouveau_graph_dump_trap_info(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; uint32_t address; - uint32_t channel; + uint32_t channel, class; uint32_t method, subc, data; address = NV_READ(0x400704); - data = NV_READ(0x400708); channel = (address >> 20) & 0x1F; subc = (address >> 16) & 0x7; method = address & 0x1FFC; + data = NV_READ(0x400708); + if (dev_priv->card_type < NV_50) { + class = NV_READ(0x400160 + subc*4) & 0xFFFF; + } else { + class = NV_READ(0x400814); + } DRM_ERROR("NV: nSource: 0x%08x, nStatus: 0x%08x\n", NV_READ(0x400108), NV_READ(0x400104)); DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -" "Method 0x%04x, Data 0x%08x\n", - channel, subc, - NV_READ(0x400160+subc*4) & 0xFFFF, - method, data + channel, subc, class, method, data ); } @@ -294,7 +297,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev) instance = NV_READ(0x00400158); notify = NV_READ(0x00400150) >> 16; DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", - nsource, nstatus); + instance, notify); } status &= ~NV_PGRAPH_INTR_NOTIFY; diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 49041862..c75a9356 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -353,7 +353,7 @@ no_agp: /* On at least NV40, RAMIN is actually at the end of vram. * We don't want to allocate this... */ if (dev_priv->card_type >= NV_40) - fb_size -= dev_priv->ramin_size; + fb_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_available_size = fb_size; DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); @@ -463,147 +463,6 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) nouveau_mem_free_block(block); } -static void -nouveau_instmem_determine_amount(struct drm_device *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - int i; - - /* Figure out how much instance memory we need */ - switch (dev_priv->card_type) { - case NV_40: - /* We'll want more instance memory than this on some NV4x cards. - * There's a 16MB aperture to play with that maps onto the end - * of vram. For now, only reserve a small piece until we know - * more about what each chipset requires. - */ - dev_priv->ramin_size = (1*1024* 1024); - break; - default: - /*XXX: what *are* the limits on ramin_size = (512*1024); - break; - } - DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10); - - /* Clear all of it, except the BIOS image that's in the first 64KiB */ - for (i=(64*1024); iramin_size; i+=4) - NV_WI32(i, 0x00000000); -} - -static void -nouveau_instmem_configure_fixed_tables(struct drm_device *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - - /* FIFO hash table (RAMHT) - * use 4k hash table at RAMIN+0x10000 - * TODO: extend the hash table - */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits); - DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, - dev_priv->ramht_size); - - /* FIFO runout table (RAMRO) - 512k at 0x11200 */ - dev_priv->ramro_offset = 0x11200; - dev_priv->ramro_size = 512; - DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, - dev_priv->ramro_size); - - /* FIFO context table (RAMFC) - * NV40 : Not sure exactly how to position RAMFC on some cards, - * 0x30002 seems to position it at RAMIN+0x20000 on these - * cards. RAMFC is 4kb (32 fifos, 128byte entries). - * Others: Position RAMFC at RAMIN+0x11400 - */ - switch(dev_priv->card_type) - { - case NV_50: - case NV_40: - case NV_44: - dev_priv->ramfc_offset = 0x20000; - dev_priv->ramfc_size = nouveau_fifo_number(dev) * - nouveau_fifo_ctx_size(dev); - break; - case NV_30: - case NV_20: - case NV_17: - case NV_10: - case NV_04: - case NV_03: - default: - dev_priv->ramfc_offset = 0x11400; - dev_priv->ramfc_size = nouveau_fifo_number(dev) * - nouveau_fifo_ctx_size(dev); - break; - } - DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, - dev_priv->ramfc_size); -} - -int nouveau_instmem_init(struct drm_device *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t offset; - int ret = 0; - - nouveau_instmem_determine_amount(dev); - nouveau_instmem_configure_fixed_tables(dev); - - if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, - dev_priv->ramht_size, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ALLOW_NO_REFS, - &dev_priv->ramht, NULL))) - return ret; - - /* Create a heap to manage RAMIN allocations, we don't allocate - * the space that was reserved for RAMHT/FC/RO. - */ - offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; - ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, - offset, dev_priv->ramin_size - offset); - if (ret) { - dev_priv->ramin_heap = NULL; - DRM_ERROR("Failed to init RAMIN heap\n"); - } - - return ret; -} - -struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, - uint32_t size, uint32_t align) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct mem_block *block; - - if (!dev_priv->ramin_heap) { - DRM_ERROR("instmem alloc called without init\n"); - return NULL; - } - - block = nouveau_mem_alloc_block(dev_priv->ramin_heap, size, align, - (DRMFILE)-2); - if (block) { - block->flags = NOUVEAU_MEM_INSTANCE; - DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n", - size, (1<start); - } - - return block; -} - -void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block) -{ - if (dev && block) { - nouveau_mem_free_block(block); - } -} - /* * Ioctls */ diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 4d5e26ab..9e792e57 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -33,7 +33,7 @@ int nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; int flags, ret; /*TODO: PCI notifier blocks */ @@ -58,7 +58,7 @@ void nouveau_notifier_takedown_channel(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->notifier_block) { nouveau_mem_free(dev, chan->notifier_block); @@ -73,7 +73,7 @@ nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, int count, uint32_t *b_offset) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_t *nobj = NULL; struct mem_block *mem; uint32_t offset; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 79875ca1..a394ae6e 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -76,7 +76,8 @@ nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); handle >>= dev_priv->ramht_bits; } - hash ^= channel << (dev_priv->ramht_bits - 4); + if (dev_priv->card_type < NV_50) + hash ^= channel << (dev_priv->ramht_bits - 4); hash <<= 3; DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); @@ -99,7 +100,7 @@ static int nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; nouveau_gpuobj_t *gpuobj = ref->gpuobj; uint32_t ctx, co, ho; @@ -148,7 +149,7 @@ static void nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; uint32_t co, ho; @@ -183,9 +184,11 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, uint32_t flags, nouveau_gpuobj_t **gpuobj_ret) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; struct nouveau_fifo *chan = NULL; nouveau_gpuobj_t *gpuobj; struct mem_block *pramin = NULL; + int ret; DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", channel, size, align, flags); @@ -196,7 +199,7 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, if (channel >= 0) { if (channel > nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); - chan = &dev_priv->fifos[channel]; + chan = dev_priv->fifos[channel]; } gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); @@ -230,6 +233,11 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, return DRM_ERR(EINVAL); } + if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + /* Allocate a chunk of the PRAMIN aperture */ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, drm_order(align), @@ -240,14 +248,9 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, } gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; - /* On NV50 the PRAMIN aperture is paged. When allocating from the - * global instmem heap, alloc and bind VRAM pages into the PRAMIN - * aperture. - */ - if (!chan && dev_priv->card_type >= NV_50) { - DRM_ERROR("back aperture with vram pages\n"); + if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) { nouveau_gpuobj_del(dev, &gpuobj); - return DRM_ERR(EINVAL); + return ret; } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { @@ -285,6 +288,7 @@ void nouveau_gpuobj_takedown(drm_device_t *dev) int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; nouveau_gpuobj_t *gpuobj; DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); @@ -298,6 +302,8 @@ int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) return DRM_ERR(EINVAL); } + engine->instmem.clear(dev, gpuobj); + if (gpuobj->im_pramin) { if (gpuobj->flags & NVOBJ_FLAG_FAKE) drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), @@ -306,9 +312,6 @@ int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) nouveau_mem_free_block(gpuobj->im_pramin); } - if (gpuobj->im_backing) - nouveau_mem_free(dev, gpuobj->im_backing); - if (gpuobj->next) gpuobj->next->prev = gpuobj->prev; if (gpuobj->prev) @@ -342,7 +345,7 @@ nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, /* NV50 channel-local instance */ if (channel > 0) { - cpramin = dev_priv->fifos[channel].ramin->gpuobj; + cpramin = dev_priv->fifos[channel]->ramin->gpuobj; *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; return 0; } @@ -358,7 +361,7 @@ nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, return 0; } else { /* ...from local heap */ - cpramin = dev_priv->fifos[gpuobj->im_channel].ramin->gpuobj; + cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; *inst = (cpramin->im_backing->start - dev_priv->fb_phys) + (gpuobj->im_pramin->start - cpramin->im_pramin->start); return 0; @@ -385,7 +388,7 @@ nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, if (channel >= 0) { if (channel > nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); - chan = &dev_priv->fifos[channel]; + chan = dev_priv->fifos[channel]; } else if (!ref_ret) return DRM_ERR(EINVAL); @@ -591,9 +594,10 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, INSTANCE_WR(*gpuobj, 2, frame | pte_flags); INSTANCE_WR(*gpuobj, 3, frame | pte_flags); } else { - nouveau_gpuobj_del(dev, gpuobj); - DRM_ERROR("stub\n"); - return DRM_ERR(EINVAL); + INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); + INSTANCE_WR(*gpuobj, 1, offset + size - 1); + INSTANCE_WR(*gpuobj, 2, offset); + INSTANCE_WR(*gpuobj, 5, 0x00010000); } (*gpuobj)->engine = NVOBJ_ENGINE_SW; @@ -672,11 +676,9 @@ nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, } if (dev_priv->card_type >= NV_50) { - nouveau_gpuobj_del(dev, gpuobj); - DRM_ERROR("stub!\n"); - return DRM_ERR(EINVAL); - } - + INSTANCE_WR(*gpuobj, 0, class); + INSTANCE_WR(*gpuobj, 5, 0x00010000); + } else { switch (class) { case NV_CLASS_NULL: INSTANCE_WR(*gpuobj, 0, 0x00001030); @@ -696,6 +698,7 @@ nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, #endif } } + } (*gpuobj)->engine = NVOBJ_ENGINE_GR; (*gpuobj)->class = class; @@ -706,7 +709,7 @@ static int nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_t *pramin = NULL; int size, base, ret; @@ -719,7 +722,16 @@ nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) /* PGRAPH context */ if (dev_priv->card_type == NV_50) { - /* RAMHT, RAMFC, PD, funny header thingo */ + /* Various fixed table thingos */ + size += 0x1400; /* mostly unknown stuff */ + size += 0x4000; /* vm pd */ + base = 0x6000; + /* RAMHT, not sure about setting size yet, 32KiB to be safe */ + size += 0x8000; + /* RAMFC */ + size += 0x1000; + /* PGRAPH context */ + size += 0x60000; } DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", @@ -748,7 +760,7 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, uint32_t vram_h, uint32_t tt_h) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_t *vram = NULL, *tt = NULL; int ret; @@ -817,7 +829,7 @@ void nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_ref_t *ref; DRM_DEBUG("ch%d\n", channel); diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 4c013c53..c2ebc714 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -47,11 +47,15 @@ #define NV_CLASS_DMA_IN_MEMORY 0x0000003D #define NV03_FIFO_SIZE 0x8000UL -#define NV_MAX_FIFO_NUMBER 32 +#define NV_MAX_FIFO_NUMBER 128 #define NV03_FIFO_REGS_SIZE 0x10000 #define NV03_FIFO_REGS(i) (0x00800000+i*NV03_FIFO_REGS_SIZE) # define NV03_FIFO_REGS_DMAPUT(i) (NV03_FIFO_REGS(i)+0x40) # define NV03_FIFO_REGS_DMAGET(i) (NV03_FIFO_REGS(i)+0x44) +#define NV50_FIFO_REGS_SIZE 0x2000 +#define NV50_FIFO_REGS(i) (0x00c00000+i*NV50_FIFO_REGS_SIZE) +# define NV50_FIFO_REGS_DMAPUT(i) (NV50_FIFO_REGS(i)+0x40) +# define NV50_FIFO_REGS_DMAGET(i) (NV50_FIFO_REGS(i)+0x44) #define NV03_PMC_BOOT_0 0x00000000 #define NV03_PMC_INTR_0 0x00000100 @@ -332,6 +336,12 @@ #define NV04_PFIFO_MODE 0x00002504 #define NV04_PFIFO_DMA 0x00002508 #define NV04_PFIFO_SIZE 0x0000250c +#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) +#define NV50_PFIFO_CTX_TABLE__SIZE 128 +#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) +#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF #define NV03_PFIFO_CACHE0_PUSH0 0x00003000 #define NV03_PFIFO_CACHE0_PULL0 0x00003040 #define NV04_PFIFO_CACHE0_PULL0 0x00003050 diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 13bc930a..bcb974bf 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -95,6 +95,12 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) switch (dev_priv->chipset & 0xf0) { case 0x00: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown= nv04_instmem_takedown; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -115,6 +121,12 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fifo.save_context = nv04_fifo_save_context; break; case 0x10: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown= nv04_instmem_takedown; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -135,6 +147,12 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fifo.save_context = nv10_fifo_save_context; break; case 0x20: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown= nv04_instmem_takedown; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -155,6 +173,12 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fifo.save_context = nv10_fifo_save_context; break; case 0x30: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown= nv04_instmem_takedown; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -175,6 +199,12 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) engine->fifo.save_context = nv10_fifo_save_context; break; case 0x40: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown= nv04_instmem_takedown; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; engine->timer.init = nv04_timer_init; @@ -196,6 +226,12 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ + engine->instmem.init = nv50_instmem_init; + engine->instmem.takedown= nv50_instmem_takedown; + engine->instmem.populate = nv50_instmem_populate; + engine->instmem.clear = nv50_instmem_clear; + engine->instmem.bind = nv50_instmem_bind; + engine->instmem.unbind = nv50_instmem_unbind; engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; engine->timer.init = nouveau_stub_init; @@ -249,7 +285,7 @@ static int nouveau_card_init(drm_device_t *dev) * know exactly how much VRAM we're able to use for "normal" * purposes. */ - ret = nouveau_instmem_init(dev); + ret = engine->instmem.init(dev); if (ret) return ret; /* Setup the memory manager */ @@ -295,6 +331,7 @@ static void nouveau_card_takedown(drm_device_t *dev) engine->mc.takedown(dev); nouveau_gpuobj_takedown(dev); nouveau_mem_close(dev); + engine->instmem.takedown(dev); } /* here a client dies, release the stuff that was allocated for its filp */ @@ -456,6 +493,8 @@ void nouveau_wait_for_idle(struct drm_device *dev) case NV_03: while(NV_READ(NV03_PGRAPH_STATUS)); break; + case NV_50: + break; default: while(NV_READ(NV04_PGRAPH_STATUS)); break; diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index b84f74c1..e2e934d7 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -39,7 +39,7 @@ int nv04_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(channel), @@ -60,6 +60,9 @@ nv04_fifo_create_context(drm_device_t *dev, int channel) NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0)); + + /* enable the fifo dma operation */ + NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<ramfc) nouveau_gpuobj_ref_del(dev, &chan->ramfc); @@ -77,7 +82,7 @@ int nv04_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); @@ -105,7 +110,7 @@ int nv04_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 1aaae33c..df23d279 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -309,7 +309,7 @@ void nouveau_nv04_context_switch(drm_device_t *dev) for (i = 0; ififos[channel_old].pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4); + dev_priv->fifos[channel_old]->pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4); index++; } @@ -321,7 +321,7 @@ void nouveau_nv04_context_switch(drm_device_t *dev) for (i = 0; ififos[channel].pgraph_ctx[index]); + NV_WRITE(nv04_graph_ctx_regs[i].reg+j*4, dev_priv->fifos[channel]->pgraph_ctx[index]); index++; } @@ -340,10 +340,10 @@ int nv04_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; DRM_DEBUG("nv04_graph_context_create %d\n", channel); - memset(dev_priv->fifos[channel].pgraph_ctx, 0, sizeof(dev_priv->fifos[channel].pgraph_ctx)); + memset(dev_priv->fifos[channel]->pgraph_ctx, 0, sizeof(dev_priv->fifos[channel]->pgraph_ctx)); //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; - dev_priv->fifos[channel].pgraph_ctx[0] = 0x0001ffff; + dev_priv->fifos[channel]->pgraph_ctx[0] = 0x0001ffff; /* is it really needed ??? */ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); @@ -379,7 +379,7 @@ int nv04_graph_init(drm_device_t *dev) { // check the context is big enough for ( i = 0 ; isizeof(dev_priv->fifos[0].pgraph_ctx) ) + if ( sum*4>sizeof(dev_priv->fifos[0]->pgraph_ctx) ) DRM_ERROR("pgraph_ctx too small\n"); NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c new file mode 100644 index 00000000..ac7d4347 --- /dev/null +++ b/shared-core/nv04_instmem.c @@ -0,0 +1,165 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +static void +nv04_instmem_determine_amount(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + /* Figure out how much instance memory we need */ + switch (dev_priv->card_type) { + case NV_40: + /* We'll want more instance memory than this on some NV4x cards. + * There's a 16MB aperture to play with that maps onto the end + * of vram. For now, only reserve a small piece until we know + * more about what each chipset requires. + */ + dev_priv->ramin_rsvd_vram = (1*1024* 1024); + break; + default: + /*XXX: what *are* the limits on ramin_rsvd_vram = (512*1024); + break; + } + DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10); + + /* Clear all of it, except the BIOS image that's in the first 64KiB */ + for (i=(64*1024); iramin_rsvd_vram; i+=4) + NV_WI32(i, 0x00000000); +} + +static void +nv04_instmem_configure_fixed_tables(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /* FIFO hash table (RAMHT) + * use 4k hash table at RAMIN+0x10000 + * TODO: extend the hash table + */ + dev_priv->ramht_offset = 0x10000; + dev_priv->ramht_bits = 9; + dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, + dev_priv->ramht_size); + + /* FIFO runout table (RAMRO) - 512k at 0x11200 */ + dev_priv->ramro_offset = 0x11200; + dev_priv->ramro_size = 512; + DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, + dev_priv->ramro_size); + + /* FIFO context table (RAMFC) + * NV40 : Not sure exactly how to position RAMFC on some cards, + * 0x30002 seems to position it at RAMIN+0x20000 on these + * cards. RAMFC is 4kb (32 fifos, 128byte entries). + * Others: Position RAMFC at RAMIN+0x11400 + */ + switch(dev_priv->card_type) + { + case NV_40: + case NV_44: + dev_priv->ramfc_offset = 0x20000; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * + nouveau_fifo_ctx_size(dev); + break; + case NV_30: + case NV_20: + case NV_17: + case NV_10: + case NV_04: + case NV_03: + default: + dev_priv->ramfc_offset = 0x11400; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * + nouveau_fifo_ctx_size(dev); + break; + } + DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, + dev_priv->ramfc_size); +} + +int nv04_instmem_init(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t offset; + int ret = 0; + + nv04_instmem_determine_amount(dev); + nv04_instmem_configure_fixed_tables(dev); + + if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, + dev_priv->ramht_size, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ALLOW_NO_REFS, + &dev_priv->ramht, NULL))) + return ret; + + /* Create a heap to manage RAMIN allocations, we don't allocate + * the space that was reserved for RAMHT/FC/RO. + */ + offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; + ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, + offset, dev_priv->ramin_rsvd_vram - offset); + if (ret) { + dev_priv->ramin_heap = NULL; + DRM_ERROR("Failed to init RAMIN heap\n"); + } + + return ret; +} + +void +nv04_instmem_takedown(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + nouveau_gpuobj_del(dev, &dev_priv->ramht); +} + +int +nv04_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz) +{ + if (gpuobj->im_backing) + return DRM_ERR(EINVAL); + + return 0; +} + +void +nv04_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + if (gpuobj && gpuobj->im_backing) { + if (gpuobj->im_bound) + dev_priv->Engine.instmem.unbind(dev, gpuobj); + nouveau_mem_free(dev, gpuobj->im_backing); + gpuobj->im_backing = NULL; + } +} + +int +nv04_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +{ + if (!gpuobj->im_pramin || gpuobj->im_bound) + return DRM_ERR(EINVAL); + + gpuobj->im_bound = 1; + return 0; +} + +int +nv04_instmem_unbind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +{ + if (gpuobj->im_bound == 0) + return DRM_ERR(EINVAL); + + gpuobj->im_bound = 0; + return 0; +} + diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index 07ec4635..2d8d5a0d 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -40,7 +40,7 @@ int nv10_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(channel), @@ -64,6 +64,8 @@ nv10_fifo_create_context(drm_device_t *dev, int channel) #endif 0); + /* enable the fifo dma operation */ + NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<ramfc) nouveau_gpuobj_ref_del(dev, &chan->ramfc); @@ -81,7 +85,7 @@ int nv10_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); @@ -123,7 +127,7 @@ int nv10_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index d1fe0a54..c544afac 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -547,7 +547,7 @@ static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg) static void restore_ctx_regs(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *fifo = &dev_priv->fifos[channel]; + struct nouveau_fifo *fifo = dev_priv->fifos[channel]; int i, j; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]); @@ -577,10 +577,10 @@ void nouveau_nv10_context_switch(drm_device_t *dev) // save PGRAPH context for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) - dev_priv->fifos[channel_old].pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); + dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); if (dev_priv->chipset>=0x17) { for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) - dev_priv->fifos[channel_old].pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); + dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); } nouveau_wait_for_idle(dev); @@ -613,7 +613,7 @@ void nouveau_nv10_context_switch(drm_device_t *dev) } while (0) int nv10_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *fifo = &dev_priv->fifos[channel]; + struct nouveau_fifo *fifo = dev_priv->fifos[channel]; uint32_t tmp, vramsz; DRM_DEBUG("nv10_graph_context_create %d\n", channel); diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 13271051..06d7e440 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -32,7 +32,7 @@ int nv20_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; unsigned int ctx_size = NV20_GRCTX_SIZE; int ret; @@ -51,7 +51,7 @@ int nv20_graph_create_context(drm_device_t *dev, int channel) { void nv20_graph_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); @@ -76,7 +76,7 @@ static void nv20_graph_rdi(drm_device_t *dev) { int nv20_graph_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t instance; instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); @@ -97,7 +97,7 @@ int nv20_graph_save_context(drm_device_t *dev, int channel) { int nv20_graph_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t instance; instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 65f4f868..a83ad714 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -104,7 +104,7 @@ int nv30_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *); unsigned int ctx_size; int ret; @@ -135,7 +135,7 @@ void nv30_graph_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); @@ -164,7 +164,7 @@ nouveau_graph_wait_idle(drm_device_t *dev) int nv30_graph_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; if (!chan->ramin_grctx) @@ -181,7 +181,7 @@ int nv30_graph_load_context(drm_device_t *dev, int channel) int nv30_graph_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; if (!chan->ramin_grctx) diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index eed3e45b..818a9024 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -40,7 +40,7 @@ int nv40_fifo_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(channel), @@ -67,6 +67,8 @@ nv40_fifo_create_context(drm_device_t *dev, int channel) RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4); RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); + /* enable the fifo dma operation */ + NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<ramfc) nouveau_gpuobj_ref_del(dev, &chan->ramfc); @@ -84,7 +88,7 @@ int nv40_fifo_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp, tmp2; NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); @@ -143,7 +147,7 @@ int nv40_fifo_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 3f33cee6..94d76505 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1228,7 +1228,7 @@ nv40_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *); unsigned int ctx_size; int ret; @@ -1287,7 +1287,7 @@ void nv40_graph_destroy_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); @@ -1330,7 +1330,7 @@ int nv40_graph_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; if (!chan->ramin_grctx) @@ -1347,7 +1347,7 @@ int nv40_graph_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; int ret; diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index e5d37949..d4c3ca87 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -28,55 +28,306 @@ #include "drm.h" #include "nouveau_drv.h" +typedef struct { + nouveau_gpuobj_ref_t *thingo; + nouveau_gpuobj_ref_t *dummyctx; +} nv50_fifo_priv; + +#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) + +static void +nv50_fifo_init_thingo(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; + nouveau_gpuobj_ref_t *thingo = priv->thingo; + int i, fi=2; + + DRM_DEBUG("\n"); + + INSTANCE_WR(thingo->gpuobj, 0, 0x7e); + INSTANCE_WR(thingo->gpuobj, 1, 0x7e); + for (i = 0; i fifos[i]) { + INSTANCE_WR(thingo->gpuobj, fi, i); + fi++; + } + } + + NV_WRITE(0x32f4, thingo->instance >> 12); + NV_WRITE(0x32ec, fi); + NV_WRITE(0x2500, 0x101); +} + +static int +nv50_fifo_channel_enable(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + DRM_DEBUG("ch%d\n", channel); + + if (IS_G80) { + if (!chan->ramin) + return DRM_ERR(EINVAL); + + NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), + (chan->ramin->instance >> 12) | + NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); + } else { + if (!chan->ramfc) + return DRM_ERR(EINVAL); + + NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), + (chan->ramfc->instance >> 8) | + NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); + } + + nv50_fifo_init_thingo(dev); + return 0; +} + +static void +nv50_fifo_channel_disable(drm_device_t *dev, int channel, int nt) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("ch%d, nt=%d\n", channel, nt); + + if (IS_G80) { + NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), + NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80); + } else { + NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), + NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84); + } + + if (!nt) nv50_fifo_init_thingo(dev); +} + static void nv50_fifo_init_reset(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; uint32_t pmc_e; + DRM_DEBUG("\n"); + pmc_e = NV_READ(NV03_PMC_ENABLE); NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PFIFO); pmc_e = NV_READ(NV03_PMC_ENABLE); NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO); } +static void +nv50_fifo_init_context_table(drm_device_t *dev) +{ + int i; + + DRM_DEBUG("\n"); + + for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) + nv50_fifo_channel_disable(dev, i, 1); + nv50_fifo_init_thingo(dev); +} + +static void +nv50_fifo_init_regs__nv(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + + NV_WRITE(0x250c, 0x6f3cfc34); +} + +static int +nv50_fifo_init_regs(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; + int ret; + + DRM_DEBUG("\n"); + + if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, 0x1000, + 0x1000, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + &priv->dummyctx))) + return ret; + + NV_WRITE(0x2500, 0); + NV_WRITE(0x3250, 0); + NV_WRITE(0x3220, 0); + NV_WRITE(0x3204, 0); + NV_WRITE(0x3210, 0); + NV_WRITE(0x3270, 0); + + if (IS_G80) { + NV_WRITE(0x2600, (priv->dummyctx->instance>>8) | (1<<31)); + NV_WRITE(0x27fc, (priv->dummyctx->instance>>8) | (1<<31)); + } else { + NV_WRITE(0x2600, (priv->dummyctx->instance>>12) | (1<<31)); + NV_WRITE(0x27fc, (priv->dummyctx->instance>>12) | (1<<31)); + } + + return 0; +} + int nv50_fifo_init(drm_device_t *dev) { + drm_nouveau_private_t *dev_priv = dev->dev_private; + nv50_fifo_priv *priv; + int ret; + + DRM_DEBUG("\n"); + + priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); + if (!priv) + return DRM_ERR(ENOMEM); + dev_priv->Engine.fifo.priv = priv; + nv50_fifo_init_reset(dev); - DRM_ERROR("stub!\n"); + if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, (128+2)*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, + &priv->thingo))) { + DRM_ERROR("error creating thingo: %d\n", ret); + return ret; + } + nv50_fifo_init_context_table(dev); + + nv50_fifo_init_regs__nv(dev); + if ((ret = nv50_fifo_init_regs(dev))) + return ret; + return 0; } void nv50_fifo_takedown(drm_device_t *dev) { - DRM_ERROR("stub!\n"); + drm_nouveau_private_t *dev_priv = dev->dev_private; + nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; + + DRM_DEBUG("\n"); + + if (!priv) + return; + + nouveau_gpuobj_ref_del(dev, &priv->thingo); + nouveau_gpuobj_ref_del(dev, &priv->dummyctx); + + dev_priv->Engine.fifo.priv = NULL; + drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); } int nv50_fifo_create_context(drm_device_t *dev, int channel) { - DRM_ERROR("stub!\n"); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_t *ramfc = NULL; + int ret; + + DRM_DEBUG("ch%d\n", channel); + + if (IS_G80) { + uint32_t ramfc_offset; + ramfc_offset = chan->ramin->gpuobj->im_pramin->start + 0x1000; + if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, 0x100, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + &ramfc, &chan->ramfc))) + return ret; + } else { + if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, 0x100, + 256, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + &chan->ramfc))) + return ret; + ramfc = chan->ramfc->gpuobj; + } + + INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4); + INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); + INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */ + INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff); + INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff); + INSTANCE_WR(ramfc, 0x10/4, 0x00000000); + INSTANCE_WR(ramfc, 0x08/4, 0x00000000); + INSTANCE_WR(ramfc, 0x40/4, 0x00000000); + INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0); + INSTANCE_WR(ramfc, 0x54/4, 0x000f0000); + INSTANCE_WR(ramfc, 0x7c/4, 0x30000001); + INSTANCE_WR(ramfc, 0x78/4, 0x00000000); + INSTANCE_WR(ramfc, 0x4c/4, 0x00007fff); + + if (!IS_G80) { + INSTANCE_WR(chan->ramin->gpuobj, 0, channel); + INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance); + + INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */ + INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); + } + + if ((ret = nv50_fifo_channel_enable(dev, channel))) { + DRM_ERROR("error enabling ch%d: %d\n", channel, ret); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + return ret; + } + return 0; } void nv50_fifo_destroy_context(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + DRM_DEBUG("ch%d\n", channel); + + nv50_fifo_channel_disable(dev, channel, 0); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int nv50_fifo_load_context(drm_device_t *dev, int channel) { - DRM_ERROR("stub!\n"); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_t *ramfc = chan->ramfc->gpuobj; + + DRM_DEBUG("ch%d\n", channel); + + /*XXX: incomplete, only touches the regs that NV does */ + + NV_WRITE(0x3244, 0); + NV_WRITE(0x3240, 0); + + NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4)); + NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4)); + NV_WRITE(0x3254, 1); + NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4)); + + if (!IS_G80) { + NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4)); + NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4)); + } + + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, channel | (1<<16)); return 0; } int nv50_fifo_save_context(drm_device_t *dev, int channel) { + DRM_DEBUG("ch%d\n", channel); DRM_ERROR("stub!\n"); return 0; } diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 8c3e2b9b..271ed733 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -28,57 +28,274 @@ #include "drm.h" #include "nouveau_drv.h" +#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) + static void nv50_graph_init_reset(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; uint32_t pmc_e; + DRM_DEBUG("\n"); + pmc_e = NV_READ(NV03_PMC_ENABLE); NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PGRAPH); pmc_e = NV_READ(NV03_PMC_ENABLE); NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH); } +static void +nv50_graph_init_regs__nv(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + + NV_WRITE(0x400804, 0xc0000000); + NV_WRITE(0x406800, 0xc0000000); + NV_WRITE(0x400c04, 0xc0000000); + NV_WRITE(0x401804, 0xc0000000); + NV_WRITE(0x405018, 0xc0000000); + NV_WRITE(0x402000, 0xc0000000); + + NV_WRITE(0x400108, 0xffffffff); + NV_WRITE(0x400100, 0xffffffff); + + NV_WRITE(0x400824, 0x00004000); + NV_WRITE(0x400500, 0x00010001); +} + +static void +nv50_graph_init_regs(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + + NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */); +} + +static uint32_t nv84_ctx_voodoo[] = { + 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, + 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, + 0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06, + 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, + 0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801, + 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, + 0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d, + 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, + 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, + 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007, + 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, + 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007, + 0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff, + 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, + 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, + 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, + 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, + 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, + 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, + 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c, + 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, + 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, + 0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4, + 0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed, + 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, + 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, + 0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1, + 0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b, + 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c, + 0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, + 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, + 0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb, + 0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0, + 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00, + 0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3, + 0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b, + 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c, + 0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00, + 0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02, + 0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389, + 0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0, + 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, + 0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5, + 0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b, + 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c, + 0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500, + 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, + 0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f, + 0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb, + 0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080, + 0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, + 0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000, + 0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, + 0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000, + 0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916, + 0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160, + 0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, + 0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003, + 0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, + 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0 +}; + +static void +nv50_graph_init_ctxctl(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t *voodoo; + + DRM_DEBUG("\n"); + + switch (dev_priv->chipset) { + case 0x84: + voodoo = nv84_ctx_voodoo; + break; + default: + DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); + break; + } + + if (voodoo) { + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); + while (*voodoo != ~0) { + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo); + voodoo++; + } + } + + NV_WRITE(0x400320, 4); + NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); +} + int nv50_graph_init(drm_device_t *dev) { - nv50_graph_init_reset(dev); + DRM_DEBUG("\n"); + + nv50_graph_init_reset(dev); + nv50_graph_init_regs__nv(dev); + nv50_graph_init_regs(dev); + nv50_graph_init_ctxctl(dev); - DRM_ERROR("stub!\n"); return 0; } void nv50_graph_takedown(drm_device_t *dev) { - DRM_ERROR("stub!\n"); + DRM_DEBUG("\n"); } int nv50_graph_create_context(drm_device_t *dev, int channel) { - DRM_ERROR("stub!\n"); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_t *ramin = chan->ramin->gpuobj; + int grctx_size = 0x60000, hdr; + int ret; + + DRM_DEBUG("ch%d\n", channel); + + if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, + grctx_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + &chan->ramin_grctx))) + return ret; + + hdr = IS_G80 ? 0x200 : 0x20; + INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002); + INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + + grctx_size - 1); + INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); + INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0); + INSTANCE_WR(ramin, (hdr + 0x10)/4, 0); + INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); + return 0; } void nv50_graph_destroy_context(drm_device_t *dev, int channel) { - DRM_ERROR("stub!\n"); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + int i, hdr; + + DRM_DEBUG("ch%d\n", channel); + + hdr = IS_G80 ? 0x200 : 0x20; + for (i=hdr; iramin->gpuobj, i/4, 0); + + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); +} + +static int +nv50_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t old_cp, tv = 20000; + int i; + + DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save); + + old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst | (1<<31)); + NV_WRITE(0x400824, NV_READ(0x400824) | + (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : + NV40_PGRAPH_CTXCTL_0310_XFER_LOAD)); + NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); + + for (i = 0; i < tv; i++) { + if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) + break; + } + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); + + if (i == tv) { + DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); + DRM_ERROR("0x40030C = 0x%08x\n", + NV_READ(NV40_PGRAPH_CTXCTL_030C)); + return DRM_ERR(EBUSY); + } + + return 0; } int nv50_graph_load_context(drm_device_t *dev, int channel) { - DRM_ERROR("stub!\n"); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); + int ret; + + DRM_DEBUG("ch%d\n", channel); + +#if 0 + if ((ret = nv50_graph_transfer_context(dev, inst, 0))) + return ret; +#endif + + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(0x400320, 4); + NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst); + return 0; } int nv50_graph_save_context(drm_device_t *dev, int channel) { - DRM_ERROR("stub!\n"); - return 0; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); + + DRM_DEBUG("ch%d\n", channel); + + return nv50_graph_transfer_context(dev, inst, 1); } diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c new file mode 100644 index 00000000..4aca9e7d --- /dev/null +++ b/shared-core/nv50_instmem.c @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +typedef struct { + uint32_t save1700[5]; /* 0x1700->0x1710 */ +} nv50_instmem_priv; + +#define NV50_INSTMEM_PAGE_SHIFT 12 +#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) +#define NV50_INSTMEM_RSVD_SIZE (64 * 1024) +#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) + +int +nv50_instmem_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nv50_instmem_priv *priv; + uint32_t rv, pt, pts, cb, cb0, cb1, unk, as; + uint32_t i, v; + int ret; + + priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); + if (!priv) + return DRM_ERR(ENOMEM); + dev_priv->Engine.instmem.priv = priv; + + /* Save current state */ + for (i = 0x1700; i <= 0x1710; i+=4) + priv->save1700[(i-0x1700)/4] = NV_READ(i); + + as = dev_priv->ramin->size; + rv = nouveau_mem_fb_amount(dev) - (1*1024*1024); + pt = rv + 0xd0000; + pts = NV50_INSTMEM_PT_SIZE(as); + cb = rv + 0xc8000; + if ((dev_priv->chipset & 0xf0) != 0x50) { + unk = cb + 0x4200; + cb0 = cb + 0x4240; + cb1 = cb + 0x278; + } else { + unk = cb + 0x5400; + cb0 = cb + 0x5440; + cb1 = cb + 0x1438; + } + + DRM_DEBUG("PRAMIN config:\n"); + DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", rv); + DRM_DEBUG(" Aperture size: %i MiB\n", as >> 20); + DRM_DEBUG(" PT base: 0x%08x\n", pt); + DRM_DEBUG(" PT size: %d KiB\n", pts >> 10); + DRM_DEBUG(" BIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); + DRM_DEBUG(" Config base: 0x%08x\n", cb); + DRM_DEBUG(" ctxdma Config0: 0x%08x\n", cb0); + DRM_DEBUG(" Config1: 0x%08x\n", cb1); + + /* Map first MiB of reserved vram into BAR0 PRAMIN aperture */ + NV_WRITE(0x1700, (rv>>16)); + /* Poke some regs.. */ + NV_WRITE(0x1704, (cb>>12)); + NV_WRITE(0x1710, (((unk-cb)>>4))|(1<<31)); + NV_WRITE(0x1704, (cb>>12)|(1<<30)); + + /* CB0, some DMA object, NFI what it points at... Needed however, + * or the PRAMIN aperture doesn't operate as expected. + */ + NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x00, 0x7fc00000); + NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x04, 0xe1ffffff); + NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x08, 0xe0000000); + NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x0c, 0x01000001); + NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x10, 0x00000000); + NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x14, 0x00000000); + + /* CB1, points at PRAMIN PT */ + NV_WRITE(NV_RAMIN + (cb1 - rv) + 0, pt | 0x63); + NV_WRITE(NV_RAMIN + (cb1 - rv) + 4, 0x00000000); + + /* Zero PRAMIN page table */ + v = NV_RAMIN + (pt - rv); + for (i = v; i < v + pts; i += 8) { + NV_WRITE(i + 0x00, 0x00000009); + NV_WRITE(i + 0x04, 0x00000000); + } + + /* Map page table into PRAMIN aperture */ + for (i = pt; i < pt + pts; i += 0x1000) { + uint32_t pte = NV_RAMIN + (pt-rv) + (((i-pt) >> 12) << 3); + DRM_DEBUG("PRAMIN PTE = 0x%08x @ 0x%08x\n", i, pte); + NV_WRITE(pte + 0x00, i | 1); + NV_WRITE(pte + 0x04, 0x00000000); + } + + /* Points at CB0 */ + NV_WRITE(0x170c, (((cb0 - cb)>>4)|(1<<31))); + + /* Confirm it all worked, should be able to read back the page table's + * PTEs from the PRAMIN BAR + */ + NV_WRITE(0x1700, pt >> 16); + if (NV_READ(0x700000) != NV_RI32(0)) { + DRM_ERROR("Failed to init PRAMIN page table\n"); + return DRM_ERR(EINVAL); + } + + /* Create a heap to manage PRAMIN aperture allocations */ + ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, pts, as-pts); + if (ret) { + DRM_ERROR("Failed to init PRAMIN heap\n"); + return DRM_ERR(ENOMEM); + } + DRM_DEBUG("NV50: PRAMIN setup ok\n"); + + /* Don't alloc the last MiB of VRAM, probably too much, but be safe + * at least for now. + */ + dev_priv->ramin_rsvd_vram = 1*1024*1024; + + /*XXX: probably incorrect, but needed to make hash func "work" */ + dev_priv->ramht_offset = 0x10000; + dev_priv->ramht_bits = 9; + dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + return 0; +} + +void +nv50_instmem_takedown(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; + int i; + + if (!priv) + return; + + /* Restore state from before init */ + for (i = 0x1700; i <= 0x1710; i+=4) + NV_WRITE(i, priv->save1700[(i-0x1700)/4]); + + dev_priv->Engine.instmem.priv = NULL; + drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); +} + +int +nv50_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz) +{ + if (gpuobj->im_backing) + return DRM_ERR(EINVAL); + + *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); + if (*sz == 0) + return DRM_ERR(EINVAL); + + gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE, + *sz, NOUVEAU_MEM_FB, + (DRMFILE)-2); + if (!gpuobj->im_backing) { + DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n"); + return DRM_ERR(ENOMEM); + } + + return 0; +} + +void +nv50_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + if (gpuobj && gpuobj->im_backing) { + if (gpuobj->im_bound) + dev_priv->Engine.instmem.unbind(dev, gpuobj); + nouveau_mem_free(dev, gpuobj->im_backing); + gpuobj->im_backing = NULL; + } +} + +int +nv50_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t pte, pte_end, vram; + + if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) + return DRM_ERR(EINVAL); + + DRM_DEBUG("st=0x%0llx sz=0x%0llx\n", + gpuobj->im_pramin->start, gpuobj->im_pramin->size); + + pte = (gpuobj->im_pramin->start >> 12) << 3; + pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; + vram = gpuobj->im_backing->start - dev_priv->fb_phys; + + if (pte == pte_end) { + DRM_ERROR("WARNING: badness in bind() pte calc\n"); + pte_end++; + } + + DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", + gpuobj->im_pramin->start, pte, pte_end); + DRM_DEBUG("first vram page: 0x%llx\n", + gpuobj->im_backing->start); + + while (pte < pte_end) { + NV_WI32(pte + 0, vram | 1); + NV_WI32(pte + 4, 0x00000000); + + pte += 8; + vram += NV50_INSTMEM_PAGE_SIZE; + } + + gpuobj->im_bound = 1; + return 0; +} + +int +nv50_instmem_unbind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t pte, pte_end; + + if (gpuobj->im_bound == 0) + return DRM_ERR(EINVAL); + + pte = (gpuobj->im_pramin->start >> 12) << 3; + pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; + while (pte < pte_end) { + NV_WI32(pte + 0, 0x00000000); + NV_WI32(pte + 4, 0x00000000); + pte += 8; + } + + gpuobj->im_bound = 0; + return 0; +} + From 3c58195ccd346cc61f98b9f89cf074edf6886723 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 9 Jul 2007 15:37:37 +1000 Subject: [PATCH 086/437] nouveau: Avoid oops Turns out lastclose() gets called even if firstopen() has never been... --- shared-core/nouveau_drv.h | 6 ++++++ shared-core/nouveau_state.c | 23 +++++++++++++++-------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 81972db5..12b78a7e 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -184,6 +184,12 @@ typedef struct nouveau_engine_func { } nouveau_engine_func_t; typedef struct drm_nouveau_private { + enum { + NOUVEAU_CARD_INIT_DOWN, + NOUVEAU_CARD_INIT_DONE, + NOUVEAU_CARD_INIT_FAILED + } init_state; + /* the card type, takes NV_* as values */ int card_type; /* exact chipset, derived from NV_PMC_BOOT_0 */ diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index bcb974bf..c51d7d5d 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -280,6 +280,7 @@ static int nouveau_card_init(drm_device_t *dev) ret = nouveau_init_engine_ptrs(dev); if (ret) return ret; engine = &dev_priv->Engine; + dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; /* Initialise instance memory, must happen before mem_init so we * know exactly how much VRAM we're able to use for "normal" @@ -316,6 +317,7 @@ static int nouveau_card_init(drm_device_t *dev) /* what about PVIDEO/PCRTC/PRAMDAC etc? */ + dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; return 0; } @@ -324,14 +326,18 @@ static void nouveau_card_takedown(drm_device_t *dev) drm_nouveau_private_t *dev_priv = dev->dev_private; nouveau_engine_func_t *engine = &dev_priv->Engine; - engine->fifo.takedown(dev); - engine->graph.takedown(dev); - engine->fb.takedown(dev); - engine->timer.takedown(dev); - engine->mc.takedown(dev); - nouveau_gpuobj_takedown(dev); - nouveau_mem_close(dev); - engine->instmem.takedown(dev); + if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { + engine->fifo.takedown(dev); + engine->graph.takedown(dev); + engine->fb.takedown(dev); + engine->timer.takedown(dev); + engine->mc.takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); + engine->instmem.takedown(dev); + + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; + } } /* here a client dies, release the stuff that was allocated for its filp */ @@ -371,6 +377,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->card_type=flags&NOUVEAU_FAMILY; dev_priv->flags=flags&NOUVEAU_FLAGS; + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; dev->dev_private = (void *)dev_priv; From 31e33813e8c1b085683e68524e680882368e59a9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 9 Jul 2007 20:02:14 +1000 Subject: [PATCH 087/437] nouveau: Don't be so strict on dev_private; nouveau_gpuobj_t *cpramin; - if ((channel > 0) && gpuobj->im_channel != channel) { - DRM_ERROR("Channel mismatch: obj %d, ref %d\n", - gpuobj->im_channel, channel); - return DRM_ERR(EINVAL); - } - /* card_type < NV_50) { *inst = gpuobj->im_pramin->start; return 0; } + if ((channel > 0) && gpuobj->im_channel != channel) { + DRM_ERROR("Channel mismatch: obj %d, ref %d\n", + gpuobj->im_channel, channel); + return DRM_ERR(EINVAL); + } + /* NV50 channel-local instance */ if (channel > 0) { cpramin = dev_priv->fifos[channel]->ramin->gpuobj; From 023f7d9c0064f912415c92a85c3a9d722191909f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 9 Jul 2007 23:58:00 +1000 Subject: [PATCH 088/437] nouveau: Allocate mappable VRAM for notifiers.. --- shared-core/nouveau_fifo.c | 4 ---- shared-core/nouveau_notifier.c | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index c140a634..4095a57f 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -497,10 +497,6 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) /* and the notifier block */ init.notifier = chan->notifier_block->start; init.notifier_size = chan->notifier_block->size; - res = drm_addmap(dev, init.notifier, init.notifier_size, _DRM_REGISTERS, - 0, &chan->notifier_map); - if (res != 0) - return res; DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init)); diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 9e792e57..30216293 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -41,6 +41,7 @@ nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp) flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; else flags = NOUVEAU_MEM_FB; + flags |= NOUVEAU_MEM_MAPPED; chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp); if (!chan->notifier_block) From 2f2d8b9688743ac6367bf13c3c023310a257ceb7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 15:59:09 -0700 Subject: [PATCH 089/437] Merge xgi_mem_req and xgi_mem_alloc into a single type. These two structures were used as the request and reply for certain ioctls. Having a different type for an ioctl's input and output is just wierd. In addition, each structure contained fields (e.g., pid) that had no business being there. This change requires updates to user-space. --- linux-core/xgi_cmdlist.c | 10 +++++----- linux-core/xgi_drv.c | 7 ++----- linux-core/xgi_drv.h | 37 +++++++++++++++++++------------------ linux-core/xgi_fb.c | 10 +++++----- linux-core/xgi_pcie.c | 25 ++++++++----------------- 5 files changed, 39 insertions(+), 50 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 04ee6e82..f7730d89 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -42,12 +42,12 @@ static void xgi_cmdlist_reset(void); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { - //struct xgi_mem_req mem_req; - struct xgi_mem_alloc mem_alloc; + struct xgi_mem_alloc mem_alloc = { + .size = size, + .owner = PCIE_2D, + }; - //mem_req.size = size; - - xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + xgi_pcie_alloc(info, &mem_alloc, 0); if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { return -1; diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 081db19e..3608c747 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -894,7 +894,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_FB_ALLOC: XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, (struct xgi_mem_req *)arg_copy, alloc); + xgi_fb_alloc(info, alloc, 0); break; case XGI_ESC_FB_FREE: XGI_INFO("Jong-xgi_ioctl_fb_free \n"); @@ -906,8 +906,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_PCIE_ALLOC: XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, ((struct xgi_mem_req *) arg_copy)->size, - ((struct xgi_mem_req *) arg_copy)->owner, alloc); + xgi_pcie_alloc(info, alloc, 0); break; case XGI_ESC_PCIE_FREE: XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", @@ -945,8 +944,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, case XGI_ESC_DEBUG_INFO: XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); xgi_restore_registers(info); - //xgi_write_pcie_mem(info, (struct xgi_mem_req *) arg_copy); - //xgi_read_pcie_mem(info, (struct xgi_mem_req *) arg_copy); break; case XGI_ESC_SUBMIT_CMDLIST: XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 248377aa..361a1e96 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -177,19 +177,23 @@ enum PcieOwner { }; struct xgi_mem_req { - enum xgi_mem_location location; - unsigned long size; - unsigned long is_front; - enum PcieOwner owner; - unsigned long pid; }; struct xgi_mem_alloc { - enum xgi_mem_location location; - unsigned long size; + unsigned int location; + unsigned int size; + unsigned int is_front; + unsigned int owner; + + /** + * Address of the memory from the graphics hardware's point of view. + */ + u32 hw_addr; + + /** + * Physical address of the memory from the processor's point of view. + */ unsigned long bus_addr; - unsigned long hw_addr; - unsigned long pid; }; struct xgi_chip_info { @@ -274,11 +278,11 @@ struct xgi_mem_pid { #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) #define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) #define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) #define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) @@ -332,25 +336,22 @@ struct xgi_mem_pid { extern int xgi_fb_heap_init(struct xgi_info * info); extern void xgi_fb_heap_cleanup(struct xgi_info * info); -extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_req * req, - struct xgi_mem_alloc * alloc); +extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + pid_t pid); extern void xgi_fb_free(struct xgi_info * info, unsigned long offset); extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt); extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_heap_cleanup(struct xgi_info * info); -extern void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, - enum PcieOwner owner, struct xgi_mem_alloc * alloc); +extern void xgi_pcie_alloc(struct xgi_info * info, + struct xgi_mem_alloc * alloc, pid_t pid); extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); extern void xgi_pcie_heap_check(void); extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address); extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); -extern void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); -extern void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); - extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index d7e9285d..ac73b41a 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -41,13 +41,13 @@ static struct xgi_mem_block *xgi_mem_new_node(void); static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size); static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset); -void xgi_fb_alloc(struct xgi_info * info, - struct xgi_mem_req * req, struct xgi_mem_alloc * alloc) +void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + pid_t pid) { struct xgi_mem_block *block; struct xgi_mem_pid *mempid_block; - if (req->is_front) { + if (alloc->is_front) { alloc->location = XGI_MEMLOC_LOCAL; alloc->bus_addr = info->fb.base; alloc->hw_addr = 0; @@ -55,7 +55,7 @@ void xgi_fb_alloc(struct xgi_info * info, ("Video RAM allocation on front buffer successfully! \n"); } else { xgi_down(info->fb_sem); - block = xgi_mem_alloc(info, req->size); + block = xgi_mem_alloc(info, alloc->size); xgi_up(info->fb_sem); if (block == NULL) { @@ -77,7 +77,7 @@ void xgi_fb_alloc(struct xgi_info * info, kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); mempid_block->location = XGI_MEMLOC_LOCAL; mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; + mempid_block->pid = pid; if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 82111249..0f82e4ec 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -764,14 +764,13 @@ static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, return (used_block); } -void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, - enum PcieOwner owner, struct xgi_mem_alloc * alloc) +void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + pid_t pid) { struct xgi_pcie_block *block; - struct xgi_mem_pid *mempid_block; xgi_down(info->pcie_sem); - block = xgi_pcie_mem_alloc(info, size, owner); + block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner); xgi_up(info->pcie_sem); if (block == NULL) { @@ -794,17 +793,18 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, PCIE_3D request means a opengl process created. PCIE_3D_TEXTURE request means texture cannot alloc from fb. */ - if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { - mempid_block = + if ((alloc->owner == PCIE_3D) + || (alloc->owner == PCIE_3D_TEXTURE)) { + struct xgi_mem_pid *mempid_block = kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); mempid_block->location = XGI_MEMLOC_NON_LOCAL; - if (owner == PCIE_3D) + if (alloc->owner == PCIE_3D) mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ else mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; + mempid_block->pid = pid; XGI_INFO ("Memory ProcessID add one pcie block pid:%ld successfully! \n", @@ -944,15 +944,6 @@ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) return NULL; } -void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) -{ - -} - -void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) -{ -} - /* address -- GE hw address */ From a3f56dc3d0620633c7719a01e6e578661d65edfc Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:07:27 -0700 Subject: [PATCH 090/437] Adjust the types of the fields of xgi_aperture. --- linux-core/xgi_drv.c | 10 ++++------ linux-core/xgi_drv.h | 6 +++--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 3608c747..2f0218e8 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -252,8 +252,7 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); - info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, - info->mmio.size); + info->mmio.vbase = ioremap_nocache(info->mmio.base, info->mmio.size); if (!info->mmio.vbase) { release_mem_region(info->mmio.base, info->mmio.size); XGI_ERROR("info->mmio.vbase failed\n"); @@ -282,8 +281,7 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) goto error_disable_dev; } - info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, - info->fb.size); + info->fb.vbase = ioremap_nocache(info->fb.base, info->fb.size); if (!info->fb.vbase) { @@ -1484,11 +1482,11 @@ void __exit xgi_exit_module(void) xgi_cmdlist_cleanup(&xgi_devices[i]); if (xgi_devices[i].fb.vbase != NULL) { - iounmap((void *)xgi_devices[i].fb.vbase); + iounmap(xgi_devices[i].fb.vbase); xgi_devices[i].fb.vbase = NULL; } if (xgi_devices[i].mmio.vbase != NULL) { - iounmap((void *)xgi_devices[i].mmio.vbase); + iounmap(xgi_devices[i].mmio.vbase); xgi_devices[i].mmio.vbase = NULL; } //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 361a1e96..6bd04cd9 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -94,9 +94,9 @@ #define XGI_CONTROL_DEVICE_NUMBER 100 struct xgi_aperture { - U32 base; // pcie base is different from fb base - U32 size; - u8 *vbase; + unsigned long base; + unsigned int size; + void *vbase; }; struct xgi_screen_info { From 7268b65d5ce804713c12b8fadc42f9a086cdfe14 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:22:48 -0700 Subject: [PATCH 091/437] Correct types that are shared with user mode. --- linux-core/xgi_cmdlist.c | 6 ++-- linux-core/xgi_drv.h | 63 ++++++++++++++++++---------------------- linux-core/xgi_misc.c | 2 +- 3 files changed, 33 insertions(+), 38 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index f7730d89..ee53d30c 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -37,7 +37,8 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); -static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter); +static void triggerHWCommandList(struct xgi_info * info, + unsigned int triggerCounter); static void xgi_cmdlist_reset(void); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) @@ -276,7 +277,8 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) } } -static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter) +static void triggerHWCommandList(struct xgi_info * info, + unsigned int triggerCounter) { static unsigned int s_triggerID = 1; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 6bd04cd9..f1cfa44e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -100,16 +100,16 @@ struct xgi_aperture { }; struct xgi_screen_info { - U32 scrn_start; - U32 scrn_xres; - U32 scrn_yres; - U32 scrn_bpp; - U32 scrn_pitch; + unsigned int scrn_start; + unsigned int scrn_xres; + unsigned int scrn_yres; + unsigned int scrn_bpp; + unsigned int scrn_pitch; }; struct xgi_sarea_info { - U32 bus_addr; - U32 size; + unsigned long bus_addr; + unsigned int size; }; struct xgi_info { @@ -153,8 +153,8 @@ struct xgi_info { }; struct xgi_ioctl_post_vbios { - U32 bus; - U32 slot; + unsigned int bus; + unsigned int slot; }; enum xgi_mem_location { @@ -176,9 +176,6 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -struct xgi_mem_req { -}; - struct xgi_mem_alloc { unsigned int location; unsigned int size; @@ -197,45 +194,41 @@ struct xgi_mem_alloc { }; struct xgi_chip_info { - U32 device_id; - char device_name[32]; - U32 vendor_id; - U32 curr_display_mode; //Singe, DualView(Contained), MHS - U32 fb_size; - U32 sarea_bus_addr; - U32 sarea_size; -}; + u16 device_id; + u16 vendor_id; -struct xgi_opengl_cmd { - U32 cmd; + char device_name[32]; + unsigned int curr_display_mode; //Singe, DualView(Contained), MHS + unsigned int fb_size; + unsigned long sarea_bus_addr; + unsigned int sarea_size; }; struct xgi_mmio_info { - struct xgi_opengl_cmd cmd_head; - void *mmioBase; - int size; + unsigned long mmio_base; + unsigned int size; }; -typedef enum { +enum xgi_batch_type { BTYPE_2D = 0, BTYPE_3D = 1, BTYPE_FLIP = 2, BTYPE_CTRL = 3, BTYPE_NONE = 0x7fffffff -} BATCH_TYPE; +}; struct xgi_cmd_info { - BATCH_TYPE _firstBeginType; - U32 _firstBeginAddr; - U32 _firstSize; - U32 _curDebugID; - U32 _lastBeginAddr; - U32 _beginCount; + unsigned int _firstBeginType; + u32 _firstBeginAddr; + u32 _firstSize; + u32 _curDebugID; + u32 _lastBeginAddr; + unsigned int _beginCount; }; struct xgi_state_info { - U32 _fromState; - U32 _toState; + unsigned int _fromState; + unsigned int _toState; }; struct xgi_mem_pid { diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 9712241f..9c9fd38f 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -48,7 +48,7 @@ void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req) { - req->mmioBase = (void *)info->mmio.base; + req->mmio_base = info->mmio.base; req->size = info->mmio.size; } From 1f4e24b429789710f5d69fc78335f20c023569bb Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:33:14 -0700 Subject: [PATCH 092/437] Move types shared with user mode to xgi_drm.h. --- linux-core/xgi_drm.h | 1 + linux-core/xgi_drv.h | 131 +----------------------------- shared-core/Makefile.am | 3 +- shared-core/xgi_drm.h | 176 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 181 insertions(+), 130 deletions(-) create mode 120000 linux-core/xgi_drm.h create mode 100644 shared-core/xgi_drm.h diff --git a/linux-core/xgi_drm.h b/linux-core/xgi_drm.h new file mode 120000 index 00000000..677586d7 --- /dev/null +++ b/linux-core/xgi_drm.h @@ -0,0 +1 @@ +../shared-core/xgi_drm.h \ No newline at end of file diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index f1cfa44e..803ed9c1 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -29,6 +29,8 @@ #ifndef _XGI_DRV_H_ #define _XGI_DRV_H_ +#include "xgi_drm.h" + #define XGI_MAJOR_VERSION 0 #define XGI_MINOR_VERSION 7 #define XGI_PATCHLEVEL 5 @@ -99,19 +101,6 @@ struct xgi_aperture { void *vbase; }; -struct xgi_screen_info { - unsigned int scrn_start; - unsigned int scrn_xres; - unsigned int scrn_yres; - unsigned int scrn_bpp; - unsigned int scrn_pitch; -}; - -struct xgi_sarea_info { - unsigned long bus_addr; - unsigned int size; -}; - struct xgi_info { struct pci_dev *dev; int flags; @@ -157,12 +146,6 @@ struct xgi_ioctl_post_vbios { unsigned int slot; }; -enum xgi_mem_location { - XGI_MEMLOC_NON_LOCAL = 0, - XGI_MEMLOC_LOCAL = 1, - XGI_MEMLOC_INVALID = 0x7fffffff -}; - enum PcieOwner { PCIE_2D = 0, /* @@ -176,61 +159,6 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -struct xgi_mem_alloc { - unsigned int location; - unsigned int size; - unsigned int is_front; - unsigned int owner; - - /** - * Address of the memory from the graphics hardware's point of view. - */ - u32 hw_addr; - - /** - * Physical address of the memory from the processor's point of view. - */ - unsigned long bus_addr; -}; - -struct xgi_chip_info { - u16 device_id; - u16 vendor_id; - - char device_name[32]; - unsigned int curr_display_mode; //Singe, DualView(Contained), MHS - unsigned int fb_size; - unsigned long sarea_bus_addr; - unsigned int sarea_size; -}; - -struct xgi_mmio_info { - unsigned long mmio_base; - unsigned int size; -}; - -enum xgi_batch_type { - BTYPE_2D = 0, - BTYPE_3D = 1, - BTYPE_FLIP = 2, - BTYPE_CTRL = 3, - BTYPE_NONE = 0x7fffffff -}; - -struct xgi_cmd_info { - unsigned int _firstBeginType; - u32 _firstBeginAddr; - u32 _firstSize; - u32 _curDebugID; - u32 _lastBeginAddr; - unsigned int _beginCount; -}; - -struct xgi_state_info { - unsigned int _fromState; - unsigned int _toState; -}; - struct xgi_mem_pid { struct list_head list; enum xgi_mem_location location; @@ -238,61 +166,6 @@ struct xgi_mem_pid { unsigned long pid; }; -/* - * Ioctl definitions - */ - -#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ - -#define XGI_IOCTL_BASE 0 -#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) -#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) - -#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) -#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) -#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) -#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) -#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) -#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) -#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) -#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) -#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) -#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) -#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) -#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) -#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) -#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) -#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) -#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) -#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 20) - -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) -#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) - -#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) - -#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) - -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info) - -#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info) -#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) -#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) - -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) - -#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_MAXNR 30 /* * flags diff --git a/shared-core/Makefile.am b/shared-core/Makefile.am index f0ebf2a3..7193e527 100644 --- a/shared-core/Makefile.am +++ b/shared-core/Makefile.am @@ -36,4 +36,5 @@ klibdrminclude_HEADERS = \ sis_drm.h \ via_drm.h \ r300_reg.h \ - via_3d_reg.h + via_3d_reg.h \ + xgi_drm.h diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h new file mode 100644 index 00000000..3a5dbc65 --- /dev/null +++ b/shared-core/xgi_drm.h @@ -0,0 +1,176 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_DRM_H_ +#define _XGI_DRM_H_ + +#include +#include + +struct xgi_chip_info { + __u16 device_id; + __u16 vendor_id; + + char device_name[32]; + unsigned int curr_display_mode; //Singe, DualView(Contained), MHS + unsigned int fb_size; + unsigned long sarea_bus_addr; + unsigned int sarea_size; +}; + +enum xgi_mem_location { + XGI_MEMLOC_NON_LOCAL = 0, + XGI_MEMLOC_LOCAL = 1, + XGI_MEMLOC_INVALID = 0x7fffffff +}; + +struct xgi_mem_alloc { + unsigned int location; + unsigned int size; + unsigned int is_front; + unsigned int owner; + + /** + * Address of the memory from the graphics hardware's point of view. + */ + __u32 hw_addr; + + /** + * Physical address of the memory from the processor's point of view. + */ + unsigned long bus_addr; +}; + +struct xgi_screen_info { + unsigned int scrn_start; + unsigned int scrn_xres; + unsigned int scrn_yres; + unsigned int scrn_bpp; + unsigned int scrn_pitch; +}; + +struct xgi_sarea_info { + unsigned long bus_addr; + unsigned int size; +}; + +enum xgi_batch_type { + BTYPE_2D = 0, + BTYPE_3D = 1, + BTYPE_FLIP = 2, + BTYPE_CTRL = 3, + BTYPE_NONE = 0x7fffffff +}; + +struct xgi_cmd_info { + unsigned int _firstBeginType; + __u32 _firstBeginAddr; + __u32 _firstSize; + __u32 _curDebugID; + __u32 _lastBeginAddr; + unsigned int _beginCount; + +}; + +struct xgi_state_info { + unsigned int _fromState; + unsigned int _toState; +}; + +struct xgi_mmio_info { + unsigned long mmio_base; + unsigned int size; +}; + + +/* + * Ioctl definitions + */ + +#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ + +#define XGI_IOCTL_BASE 0 +#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) +#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) + +#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) +#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) +#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) +#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) +#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) +#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) +#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) +#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) +#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) +#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) +#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) +#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) +#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) +#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) +#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) +#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) +#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 20) + +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) +#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) + +#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) + +#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) + +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info) + +#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info) +#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) + +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) + +#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) +#define XGI_IOCTL_MAXNR 30 + +/* + * flags + */ +#define XGI_FLAG_OPEN 0x0001 +#define XGI_FLAG_NEEDS_POSTING 0x0002 +#define XGI_FLAG_WAS_POSTED 0x0004 +#define XGI_FLAG_CONTROL 0x0010 +#define XGI_FLAG_MAP_REGS_EARLY 0x0200 + + +#endif /* _XGI_DRM_H_ */ From 5c481d0a4284ec7311a47fbeab1680d007769668 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:43:48 -0700 Subject: [PATCH 093/437] Eliminiate fields in xgi_info that are duplicates of fields in pci_dev. --- linux-core/xgi_drv.c | 51 ++++++++++++------------------------------- linux-core/xgi_drv.h | 10 ++------- linux-core/xgi_misc.c | 4 ++-- 3 files changed, 18 insertions(+), 47 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 2f0218e8..c4cc8900 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -233,10 +233,6 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) info = &xgi_devices[xgi_num_devices]; info->dev = dev; - info->vendor_id = dev->vendor; - info->device_id = dev->device; - info->bus = dev->bus->number; - info->slot = PCI_SLOT((dev)->devfn); xgi_lock_init(info); @@ -294,14 +290,13 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) info->fb.vbase = NULL; XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); - info->irq = dev->irq; /* check common error condition */ - if (info->irq == 0) { + if (info->dev->irq == 0) { XGI_ERROR("Can't find an IRQ for your XGI card! \n"); goto error_zero_dev; } - XGI_INFO("info->irq: %lx \n", info->irq); + XGI_INFO("info->irq: %lx \n", info->dev->irq); //xgi_enable_dvi_interrupt(info); @@ -568,21 +563,21 @@ int xgi_kern_open(struct inode *inode, struct file *filp) if (!(info->flags & XGI_FLAG_OPEN)) { XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); - if (info->device_id == 0) { + if (info->dev->device == 0) { XGI_INFO("open of nonexistent device %d\n", dev_num); result = -ENXIO; goto failed; } /* initialize struct irqaction */ - status = request_irq(info->irq, xgi_kern_isr, + status = request_irq(info->dev->irq, xgi_kern_isr, SA_INTERRUPT | SA_SHIRQ, "xgi", (void *)info); if (status != 0) { - if (info->irq && (status == -EBUSY)) { + if (info->dev->irq && (status == -EBUSY)) { XGI_ERROR ("Tried to get irq %d, but another driver", - (unsigned int)info->irq); + (unsigned int)info->dev->irq); XGI_ERROR("has it and is not sharing it.\n"); } XGI_ERROR("isr request failed 0x%x\n", status); @@ -651,7 +646,7 @@ int xgi_kern_release(struct inode *inode, struct file *filp) * Free the IRQ, which may block until all pending interrupt processing * has completed. */ - free_irq(info->irq, (void *)info); + free_irq(info->dev->irq, (void *)info); xgi_cmdlist_cleanup(info); @@ -1064,21 +1059,6 @@ static u8 xgi_find_pcie_capability(struct pci_dev *dev) return 0; } -static struct pci_dev *xgi_get_pci_device(struct xgi_info * info) -{ - struct pci_dev *dev; - - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); - while (dev) { - if (XGI_PCI_SLOT_NUMBER(dev) == info->slot - && XGI_PCI_BUS_NUMBER(dev) == info->bus) - return dev; - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); - } - - return NULL; -} - int xgi_kern_read_card_info(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -1089,7 +1069,7 @@ int xgi_kern_read_card_info(char *page, char **start, off_t off, struct xgi_info *info; info = (struct xgi_info *) data; - dev = xgi_get_pci_device(info); + dev = info->dev; if (!dev) return 0; @@ -1162,13 +1142,10 @@ static void xgi_proc_create(void) xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; for (info = xgi_devices; info < xgi_max_devices; info++) { - if (info->device_id == 0) - break; - /* world readable file */ flags = S_IFREG | S_IRUGO; - dev = xgi_get_pci_device(info); + dev = info->dev; if (!dev) break; @@ -1314,19 +1291,19 @@ static void xgi_dev_init(struct xgi_info * info) for (dev = xgidev_list; dev->vendor; dev++) { if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device)) { + u8 rev_id; + XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor); XGI_INFO("dev->device = pdev->device= %x \n", dev->device); - xgi_devices[found].device_id = pdev->device; + xgi_devices[found].dev = pdev; pci_read_config_byte(pdev, PCI_REVISION_ID, - &xgi_devices[found]. - revision_id); + rev_id); - XGI_INFO("PCI_REVISION_ID= %x \n", - xgi_devices[found].revision_id); + XGI_INFO("PCI_REVISION_ID= %x \n", rev_id); pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 803ed9c1..efbbd647 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -105,11 +105,6 @@ struct xgi_info { struct pci_dev *dev; int flags; int device_number; - int bus; /* PCI config info */ - int slot; - int vendor_id; - U32 device_id; - u8 revision_id; /* physical characteristics */ struct xgi_aperture mmio; @@ -125,9 +120,8 @@ struct xgi_info { bool isLUTInLFB; unsigned int sdfbPageSize; - U32 pcie_config; - U32 pcie_status; - U32 irq; + u32 pcie_config; + u32 pcie_status; atomic_t use_count; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 9c9fd38f..d3385bef 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -34,12 +34,12 @@ void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) { - req->device_id = info->device_id; + req->device_id = info->dev->device; req->device_name[0] = 'x'; req->device_name[1] = 'g'; req->device_name[2] = '4'; req->device_name[3] = '7'; - req->vendor_id = info->vendor_id; + req->vendor_id = info->dev->vendor; req->curr_display_mode = 0; req->fb_size = info->fb.size; req->sarea_bus_addr = info->sarea_info.bus_addr; From a9c49be6f8a0aa199a9dc0ffd0a9aa2b85cd796d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 18:52:43 -0700 Subject: [PATCH 094/437] Fix ioctl types. I had moved code from xgi_drv.h to xgi_drm.h before changing the ioctl types for XGI_IOCTL_(FB|PCIE)_ALLOC. --- shared-core/xgi_drm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index 3a5dbc65..0abf390a 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -140,11 +140,11 @@ struct xgi_mmio_info { #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) #define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) #define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) #define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) From 76ca1e858fb8e1a65ea49c0c62350d7ca91044a2 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 18:54:25 -0700 Subject: [PATCH 095/437] Convert occurances of U32 to other types. Most occurances of U32 were converted to u32. These are cases where the data represents something that will be written to the hardware. Other cases were converted to 'unsigned int'. U32 was the last type in xgi_types.h, so that file is removed. --- linux-core/xgi_cmdlist.c | 20 ++++++++--------- linux-core/xgi_cmdlist.h | 10 ++++----- linux-core/xgi_drv.c | 1 - linux-core/xgi_drv.h | 2 +- linux-core/xgi_fb.c | 1 - linux-core/xgi_misc.c | 1 - linux-core/xgi_pcie.c | 3 +-- linux-core/xgi_types.h | 46 ---------------------------------------- 8 files changed, 16 insertions(+), 68 deletions(-) delete mode 100644 linux-core/xgi_types.h diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index ee53d30c..7be0ac48 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" @@ -183,7 +182,7 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { - U32 *lastBatchVirtAddr; + u32 *lastBatchVirtAddr; XGI_INFO ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); @@ -195,9 +194,9 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) addFlush2D(info); } - lastBatchVirtAddr = - (U32 *) xgi_find_pcie_virt(info, - s_cmdring._lastBatchStartAddr); + lastBatchVirtAddr = + xgi_find_pcie_virt(info, + s_cmdring._lastBatchStartAddr); /* lastBatchVirtAddr should *never* be NULL. However, there * are currently some bugs that cause this to happen. The @@ -310,10 +309,9 @@ static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) static void addFlush2D(struct xgi_info * info) { - U32 *flushBatchVirtAddr; - U32 flushBatchHWAddr; - - U32 *lastBatchVirtAddr; + u32 *flushBatchVirtAddr; + u32 flushBatchHWAddr; + u32 *lastBatchVirtAddr; /* check buf is large enough to contain a new flush batch */ if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { @@ -321,7 +319,7 @@ static void addFlush2D(struct xgi_info * info) } flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; - flushBatchVirtAddr = (U32 *) xgi_find_pcie_virt(info, flushBatchHWAddr); + flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr); /* not using memcpy for I assume the address is discrete */ *(flushBatchVirtAddr + 0) = 0x10000000; @@ -335,7 +333,7 @@ static void addFlush2D(struct xgi_info * info) // ASSERT(s_cmdring._lastBatchStartAddr != NULL); lastBatchVirtAddr = - (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index c6221511..d2b95c0e 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -58,11 +58,11 @@ typedef enum { } CMD_SIZE; struct xgi_cmdring_info { - U32 _cmdRingSize; - U32 _cmdRingBuffer; - U32 _cmdRingBusAddr; - U32 _lastBatchStartAddr; - U32 _cmdRingOffset; + unsigned int _cmdRingSize; + u32 _cmdRingBuffer; + unsigned long _cmdRingBusAddr; + u32 _lastBatchStartAddr; + u32 _cmdRingOffset; }; extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index c4cc8900..b3425c75 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -25,7 +25,6 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index efbbd647..983ed0a9 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -114,7 +114,7 @@ struct xgi_info { struct xgi_sarea_info sarea_info; /* look up table parameters */ - U32 *lut_base; + u32 *lut_base; unsigned int lutPageSize; unsigned int lutPageOrder; bool isLUTInLFB; diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index ac73b41a..7d390d4b 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_fb.h" diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index d3385bef..2d310a2f 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 0f82e4ec..70459b2c 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" @@ -420,7 +419,7 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, struct page *page; unsigned long page_order = 0, count = 0, index = 0; unsigned long page_addr = 0; - unsigned long *lut_addr = NULL; + u32 *lut_addr = NULL; unsigned long lut_id = 0; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; int i, j, page_count = 0; diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h deleted file mode 100644 index f9a3360c..00000000 --- a/linux-core/xgi_types.h +++ /dev/null @@ -1,46 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_TYPES_H_ -#define _XGI_TYPES_H_ - -/**************************************************************************** - * Typedefs * - ***************************************************************************/ - -/* - * mainly for 64-bit linux, where long is 64 bits - * and win9x, where int is 16 bit. - */ -#if defined(vxworks) -typedef unsigned int U32; /* 0 to 4294967295 */ -#else -typedef unsigned long U32; /* 0 to 4294967295 */ -#endif - -#endif From 04e4922c0c407a9f0cfe268f62130891e98fc682 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Wed, 11 Jul 2007 02:33:12 +0200 Subject: [PATCH 096/437] Made drm_sg_alloc accessible from inside the DRM - drm_sg_alloc_ioctl is the ioctl wrapper --- bsd-core/drmP.h | 3 ++- bsd-core/drm_drv.c | 2 +- bsd-core/drm_scatter.c | 39 ++++++++++++++++++++------------ linux-core/drmP.h | 3 ++- linux-core/drm_drv.c | 2 +- linux-core/drm_scatter.c | 49 +++++++++++++++++++++++++--------------- 6 files changed, 61 insertions(+), 37 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 9ba3d502..6e05b58f 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -915,6 +915,7 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); /* Scatter Gather Support (drm_scatter.c) */ void drm_sg_cleanup(drm_sg_mem_t *entry); +int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request); #ifdef __FreeBSD__ /* sysctl support (drm_sysctl.h) */ @@ -989,7 +990,7 @@ int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS); int drm_agp_bind_ioctl(DRM_IOCTL_ARGS); /* Scatter Gather Support (drm_scatter.c) */ -int drm_sg_alloc(DRM_IOCTL_ARGS); +int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS); int drm_sg_free(DRM_IOCTL_ARGS); /* consistent PCI memory functions (drm_pci.c) */ diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index baaeb43c..069774c1 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -117,7 +117,7 @@ static drm_ioctl_desc_t drm_ioctls[256] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0 }, diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c index 9dc280a4..46222f18 100644 --- a/bsd-core/drm_scatter.c +++ b/bsd-core/drm_scatter.c @@ -40,28 +40,20 @@ void drm_sg_cleanup(drm_sg_mem_t *entry) free(entry, M_DRM); } -int drm_sg_alloc(DRM_IOCTL_ARGS) +int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) { - DRM_DEVICE; - drm_scatter_gather_t request; drm_sg_mem_t *entry; unsigned long pages; - int i; - - DRM_DEBUG( "%s\n", __FUNCTION__ ); if ( dev->sg ) return EINVAL; - DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data, - sizeof(request) ); - entry = malloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO); if ( !entry ) return ENOMEM; pages = round_page(request.size) / PAGE_SIZE; - DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages ); + DRM_DEBUG( "sg size=%ld pages=%ld\n", request->size, pages ); entry->pages = pages; @@ -86,11 +78,7 @@ int drm_sg_alloc(DRM_IOCTL_ARGS) DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); entry->virtual = (void *)entry->handle; - request.handle = entry->handle; - - DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data, - request, - sizeof(request) ); + request->handle = entry->handle; DRM_LOCK(); if (dev->sg) { @@ -101,6 +89,27 @@ int drm_sg_alloc(DRM_IOCTL_ARGS) dev->sg = entry; DRM_UNLOCK(); +} + +int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_scatter_gather_t request; + int ret; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + + DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data, + sizeof(request) ); + + ret = drm_sg_alloc(dev, &request); + if ( ret ) return ret; + + DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data, + request, + sizeof(request) ); + return 0; } diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c992c8d9..c274f1fa 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1129,8 +1129,9 @@ extern int drm_proc_cleanup(int minor, /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(drm_sg_mem_t * entry); -extern int drm_sg_alloc(struct inode *inode, struct file *filp, +extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 6bbe7fca..0d446a12 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -113,7 +113,7 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, #endif - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index e5c9f877..c0d6db24 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -55,6 +55,7 @@ void drm_sg_cleanup(drm_sg_mem_t * entry) entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); } +EXPORT_SYMBOL(drm_sg_cleanup); #ifdef _LP64 # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) @@ -62,13 +63,8 @@ void drm_sg_cleanup(drm_sg_mem_t * entry) # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t __user *argp = (void __user *)arg; - drm_scatter_gather_t request; drm_sg_mem_t *entry; unsigned long pages, i, j; @@ -80,17 +76,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, if (dev->sg) return -EINVAL; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); if (!entry) return -ENOMEM; memset(entry, 0, sizeof(*entry)); - - pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; - DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages); + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; + DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages); entry->pages = pages; entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), @@ -142,12 +134,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, SetPageReserved(entry->pagelist[j]); } - request.handle = entry->handle; - - if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(entry); - return -EFAULT; - } + request->handle = entry->handle; dev->sg = entry; @@ -196,6 +183,32 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, failed: drm_sg_cleanup(entry); return -ENOMEM; + +} +EXPORT_SYMBOL(drm_sg_alloc); + +int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_scatter_gather_t __user *argp = (void __user *)arg; + drm_scatter_gather_t request; + int ret; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + ret = drm_sg_alloc(priv->head->dev, &request); + if ( ret ) return ret; + + if (copy_to_user(argp, &request, sizeof(request))) { + drm_sg_cleanup(priv->head->dev->sg); + return -EFAULT; + } + + + return 0; + } int drm_sg_free(struct inode *inode, struct file *filp, From 694e1c5c3f768436651ddf95e11ab5a89ccc8ffa Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Wed, 11 Jul 2007 02:35:10 +0200 Subject: [PATCH 097/437] Added support for PCIGART for PCI(E) cards. Bumped DRM interface patchlevel. --- shared-core/nouveau_drm.h | 16 ++-- shared-core/nouveau_drv.h | 3 +- shared-core/nouveau_fifo.c | 18 +++- shared-core/nouveau_mem.c | 91 +++++++++++++------ shared-core/nouveau_object.c | 171 ++++++++++++++++++++++++----------- shared-core/nouveau_reg.h | 2 + shared-core/nouveau_state.c | 12 +++ 7 files changed, 221 insertions(+), 92 deletions(-) diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 0758991a..7abe82e0 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -25,7 +25,7 @@ #ifndef __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__ -#define NOUVEAU_DRM_HEADER_PATCHLEVEL 7 +#define NOUVEAU_DRM_HEADER_PATCHLEVEL 8 typedef struct drm_nouveau_fifo_alloc { uint32_t fb_ctxdma_handle; @@ -68,11 +68,14 @@ drm_nouveau_notifier_alloc_t; #define NOUVEAU_MEM_AGP 0x00000002 #define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 #define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008 -#define NOUVEAU_MEM_PINNED 0x00000010 -#define NOUVEAU_MEM_USER_BACKED 0x00000020 -#define NOUVEAU_MEM_MAPPED 0x00000040 -#define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */ -#define NOUVEAU_MEM_NOTIFIER 0x00000100 /* internal */ +#define NOUVEAU_MEM_PCI 0x00000010 +#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020 +#define NOUVEAU_MEM_PINNED 0x00000040 +#define NOUVEAU_MEM_USER_BACKED 0x00000080 +#define NOUVEAU_MEM_MAPPED 0x00000100 +#define NOUVEAU_MEM_INSTANCE 0x00000200 /* internal */ +#define NOUVEAU_MEM_NOTIFIER 0x00000400 /* internal */ + typedef struct drm_nouveau_mem_alloc { int flags; int alignment; @@ -95,6 +98,7 @@ drm_nouveau_mem_free_t; #define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 #define NOUVEAU_GETPARAM_FB_SIZE 8 #define NOUVEAU_GETPARAM_AGP_SIZE 9 +#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 typedef struct drm_nouveau_getparam { uint64_t param; uint64_t value; diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 12b78a7e..ea03fe37 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -34,7 +34,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 7 +#define DRIVER_PATCHLEVEL 8 #define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FLAGS 0xFFFF0000 @@ -229,6 +229,7 @@ typedef struct drm_nouveau_private { struct mem_block *fb_heap; struct mem_block *fb_nomap_heap; struct mem_block *ramin_heap; + struct mem_block *pci_heap; /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ uint32_t ctx_table_size; diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 4095a57f..bc3a9948 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -210,11 +210,19 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } if (cb->flags & NOUVEAU_MEM_AGP) { - ret = nouveau_gpuobj_dma_new - (dev, channel, NV_CLASS_DMA_IN_MEMORY, - cb->start - dev_priv->agp_phys, - cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, - &pushbuf); + DRM_DEBUG("Creating CB in AGP memory\n"); + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + cb->start - dev_priv->agp_phys, + cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); + } else if ( cb->flags & NOUVEAU_MEM_PCI) { + DRM_DEBUG("Creating CB in PCI memory starting at virt 0x%08llx size %d\n", cb->start, cb->size); + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + cb->start, + cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf); } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new (dev, channel, NV_CLASS_DMA_IN_MEMORY, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index c75a9356..79f94fd4 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -211,6 +211,10 @@ void nouveau_mem_close(struct drm_device *dev) drm_nouveau_private_t *dev_priv = dev->dev_private; nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); + if ( dev_priv->pci_heap ) + { + nouveau_mem_takedown(&dev_priv->pci_heap); + } } /* returns the amount of FB ram in bytes */ @@ -283,8 +287,10 @@ int nouveau_mem_init(struct drm_device *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; uint32_t fb_size; + drm_scatter_gather_t sgreq; dev_priv->agp_phys=0; dev_priv->fb_phys=0; + sgreq . size = 4 << 20; //4MB of PCI scatter-gather zone /* init AGP */ dev_priv->agp_heap=NULL; @@ -340,8 +346,26 @@ int nouveau_mem_init(struct drm_device *dev) dev_priv->agp_phys = info.aperture_base; dev_priv->agp_available_size = info.aperture_size; } -no_agp: +goto have_agp; +no_agp: + dev_priv->pci_heap = NULL; + DRM_DEBUG("Allocating sg memory for PCI DMA\n"); + if ( drm_sg_alloc(dev, &sgreq) ) + { + DRM_ERROR("Unable to allocate 4MB of scatter-gather pages for PCI DMA!"); + goto no_pci; + } + + DRM_DEBUG("Got %d KiB\n", (dev->sg->pages * PAGE_SIZE) >> 10); + if ( nouveau_mem_init_heap(&dev_priv->pci_heap, dev->sg->virtual, dev->sg->pages * PAGE_SIZE)) + { + DRM_ERROR("Unable to initialize pci_heap!"); + goto no_pci; + } + +no_pci: +have_agp: /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), @@ -405,29 +429,40 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6 if (size & (~PAGE_MASK)) size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; - if (flags&NOUVEAU_MEM_AGP) { - type=NOUVEAU_MEM_AGP; - block = nouveau_mem_alloc_block(dev_priv->agp_heap, size, - alignment, filp); - if (block) goto alloc_ok; - } - if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) { - type=NOUVEAU_MEM_FB; - if (!(flags&NOUVEAU_MEM_MAPPED)) { - block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap, - size, alignment, filp); - if (block) goto alloc_ok; - } - block = nouveau_mem_alloc_block(dev_priv->fb_heap, size, - alignment, filp); - if (block) goto alloc_ok; - } - if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) { - type=NOUVEAU_MEM_AGP; - block = nouveau_mem_alloc_block(dev_priv->agp_heap, size, - alignment, filp); - if (block) goto alloc_ok; - } + +#define NOUVEAU_MEM_ALLOC_AGP {\ + type=NOUVEAU_MEM_AGP;\ + block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ + alignment, filp);\ + if (block) goto alloc_ok;\ + } + +#define NOUVEAU_MEM_ALLOC_PCI {\ + type = NOUVEAU_MEM_PCI;\ + block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, alignment, filp);\ + if ( block ) goto alloc_ok;\ + } + +#define NOUVEAU_MEM_ALLOC_FB {\ + type=NOUVEAU_MEM_FB;\ + if (!(flags&NOUVEAU_MEM_MAPPED)) {\ + block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ + size, alignment, filp); \ + if (block) goto alloc_ok;\ + }\ + block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ + alignment, filp);\ + if (block) goto alloc_ok;\ + } + + + if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB + if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP + if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI + if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB + if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP + if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI + return NULL; @@ -436,15 +471,19 @@ alloc_ok: if (flags&NOUVEAU_MEM_MAPPED) { - int ret; + int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; if (type == NOUVEAU_MEM_AGP) ret = drm_addmap(dev, block->start - dev->agp->base, block->size, _DRM_AGP, 0, &block->map); - else + else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start, block->size, _DRM_FRAME_BUFFER, 0, &block->map); + else if (type == NOUVEAU_MEM_PCI) + ret = drm_addmap(dev, block->start - (unsigned long int)dev->sg->virtual, block->size, + _DRM_SCATTER_GATHER, 0, &block->map); + if (ret) { nouveau_mem_free_block(block); return NULL; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index bf811b4b..dcb29b40 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -515,30 +515,6 @@ nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, return 0; } -/* - DMA objects are used to reference a piece of memory in the - framebuffer, PCI or AGP address space. Each object is 16 bytes big - and looks as follows: - - entry[0] - 11:0 class (seems like I can always use 0 here) - 12 page table present? - 13 page entry linear? - 15:14 access: 0 rw, 1 ro, 2 wo - 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP - 31:20 dma adjust (bits 0-11 of the address) - entry[1] - dma limit - entry[2] - 1 0 readonly, 1 readwrite - 31:12 dma frame address (bits 12-31 of the address) - - Non linear page tables seem to need a list of frame addresses afterwards, - the rivatv project has some info on this. - - The method below creates a DMA object in instance RAM and returns a handle - to it that can be used to set up context objects. -*/ static int nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) @@ -553,6 +529,33 @@ nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) return 16; } +/* + DMA objects are used to reference a piece of memory in the + framebuffer, PCI or AGP address space. Each object is 16 bytes big + and looks as follows: + + entry[0] + 11:0 class (seems like I can always use 0 here) + 12 page table present? + 13 page entry linear? + 15:14 access: 0 rw, 1 ro, 2 wo + 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP + 31:20 dma adjust (bits 0-11 of the address) + entry[1] + dma limit (size of transfer) + entry[X] + 1 0 readonly, 1 readwrite + 31:12 dma frame address of the page (bits 12-31 of the address) + entry[N] + page table terminator, same value as the first pte, as does nvidia + rivatv uses 0xffffffff + + Non linear page tables need a list of frame addresses afterwards, + the rivatv project has some info on this. + + The method below creates a DMA object in instance RAM and returns a handle + to it that can be used to set up context objects. +*/ int nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, uint64_t offset, uint64_t size, int access, int target, @@ -560,13 +563,28 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, { drm_nouveau_private_t *dev_priv = dev->dev_private; int ret; - + uint32_t is_scatter_gather = 0; + DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", channel, class, offset, size); DRM_DEBUG("access=%d target=%d\n", access, target); + switch (target) { + case NV_DMA_TARGET_AGP: + offset += dev_priv->agp_phys; + break; + case NV_DMA_TARGET_PCI_NONLINEAR: + /*assume the "offset" is a virtual memory address*/ + is_scatter_gather = 1; + /*put back the right value*/ + target = NV_DMA_TARGET_PCI; + break; + default: + break; + } + ret = nouveau_gpuobj_new(dev, channel, - nouveau_gpuobj_class_instmem_size(dev, class), + is_scatter_gather ? ((((size + PAGE_SIZE - 1) / PAGE_SIZE) << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, gpuobj); @@ -577,22 +595,53 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, if (dev_priv->card_type < NV_50) { uint32_t frame, adjust, pte_flags = 0; - - if (target == NV_DMA_TARGET_AGP) - offset += dev_priv->agp_phys; - if (access != NV_DMA_ACCESS_RO) - pte_flags |= (1<<1); - frame = offset & ~0x00000fff; adjust = offset & 0x00000fff; - - INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | - (adjust << 20) | + if (access != NV_DMA_ACCESS_RO) + pte_flags |= (1<<1); + + if ( ! is_scatter_gather ) + { + frame = offset & ~0x00000fff; + + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | + (adjust << 20) | (access << 14) | (target << 16) | class)); - INSTANCE_WR(*gpuobj, 1, size - 1); - INSTANCE_WR(*gpuobj, 2, frame | pte_flags); - INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + INSTANCE_WR(*gpuobj, 1, size - 1); + INSTANCE_WR(*gpuobj, 2, frame | pte_flags); + INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + } + else + { + uint32_t instance_offset; + uint32_t bus_addr; + size = (uint32_t) size; + + DRM_DEBUG("Creating PCI DMA object using virtual zone starting at 0x%08x, size %d\n", (uint32_t) offset, (uint32_t)size); + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + INSTANCE_WR(*gpuobj, 1, size-1); + + /*write starting at the third dword*/ + instance_offset = 2; + + /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ + while ( size > 0 ) { + bus_addr = (uint32_t) page_address(vmalloc_to_page((void *) (uint32_t) offset)); + bus_addr |= (offset & ~PAGE_MASK); + bus_addr = virt_to_bus((void *)bus_addr); + frame = bus_addr & ~0x00000FFF; + INSTANCE_WR(*gpuobj, instance_offset, frame | pte_flags); + offset += PAGE_SIZE; + instance_offset ++; + size -= PAGE_SIZE; + } + + } } else { INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); INSTANCE_WR(*gpuobj, 1, offset + size - 1); @@ -804,24 +853,38 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, return ret; } - /* non-AGP unimplemented */ - if (dev_priv->agp_heap == NULL) - return 0; - - /* GART ctxdma */ - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, NV_DMA_TARGET_AGP, - &tt))) { - DRM_ERROR("Error creating TT ctxdma: %d\n", ret); - return ret; + if (dev_priv->agp_heap) { + /* AGPGART ctxdma */ + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP, &tt))) { + DRM_ERROR("Error creating AGP TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing AGP TT ctxdma: %d\n", ret); + return ret; + } } - - if ((ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL))) { - DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); - return ret; + else { + /*PCI*/ + if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + (unsigned int) dev->sg->virtual, dev->sg->pages * PAGE_SIZE, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { + DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing PCI TT ctxdma: %d\n", ret); + return ret; + } } - return 0; } diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index c2ebc714..a66d2d34 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -39,6 +39,8 @@ #define NV_DMA_TARGET_VIDMEM 0 #define NV_DMA_TARGET_PCI 2 #define NV_DMA_TARGET_AGP 3 +/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/ +#define NV_DMA_TARGET_PCI_NONLINEAR 8 /* Some object classes we care about in the drm */ #define NV_CLASS_DMA_FROM_MEMORY 0x00000002 diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index c51d7d5d..14b33a4a 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -348,6 +348,7 @@ void nouveau_preclose(drm_device_t * dev, DRMFILE filp) nouveau_fifo_cleanup(dev, filp); nouveau_mem_release(filp,dev_priv->fb_heap); nouveau_mem_release(filp,dev_priv->agp_heap); + nouveau_mem_release(filp,dev_priv->pci_heap); } /* first module load, setup the mmio/fb mapping */ @@ -442,6 +443,15 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) case NOUVEAU_GETPARAM_AGP_PHYSICAL: getparam.value=dev_priv->agp_phys; break; + case NOUVEAU_GETPARAM_PCI_PHYSICAL: + if ( dev -> sg ) + getparam.value=dev->sg->virtual; + else + { + DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); + DRM_ERR(EINVAL); + } + break; case NOUVEAU_GETPARAM_FB_SIZE: getparam.value=dev_priv->fb_available_size; break; @@ -472,6 +482,8 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) switch (setparam.value) { case NOUVEAU_MEM_AGP: case NOUVEAU_MEM_FB: + case NOUVEAU_MEM_PCI: + case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE: break; default: DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n", From 13e1377044d581d692af77656e3bc32c9eb183f7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 11 Jul 2007 12:38:48 +1000 Subject: [PATCH 098/437] nouveau: Some checks on userspace object handles. --- shared-core/nouveau_fifo.c | 3 +++ shared-core/nouveau_object.c | 26 +++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index bc3a9948..fcdc14c8 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -474,6 +474,9 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init)); + if (init.fb_ctxdma_handle == ~0 || init.tt_ctxdma_handle == ~0) + return DRM_ERR(EINVAL); + res = nouveau_fifo_alloc(dev, &init.channel, filp, init.fb_ctxdma_handle, init.tt_ctxdma_handle); diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index dcb29b40..82944c2b 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -470,6 +470,26 @@ nouveau_gpuobj_new_ref(drm_device_t *dev, int oc, int rc, uint32_t handle, return 0; } +static int +nouveau_gpuobj_ref_find(drm_device_t *dev, int channel, uint32_t handle, + nouveau_gpuobj_ref_t **ref_ret) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_ref_t *ref = chan->ramht_refs; + + while (ref) { + if (ref->handle == handle) { + if (ref_ret) + *ref_ret = ref; + return 0; + } + ref = ref->next; + } + + return DRM_ERR(EINVAL); +} + int nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, uint32_t flags, nouveau_gpuobj_t **pgpuobj, @@ -927,7 +947,11 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) } //FIXME: check args, only allow trusted objects to be created - //FIXME: check for pre-existing handle + + if (init.handle == ~0) + return DRM_ERR(EINVAL); + if (nouveau_gpuobj_ref_find(dev, init.channel, init.handle, NULL) == 0) + return DRM_ERR(EEXIST); if ((ret = nouveau_gpuobj_gr_new(dev, init.channel, init.class, &gr))) { DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", From 5ccadac9e3b1beb8ac0177c7a39862094fe3b6de Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 11 Jul 2007 14:22:59 +1000 Subject: [PATCH 099/437] nouveau/nv50: G80 fixes. Again, no hardware, so no idea if it'll even work yet. I understand how the PRAMIN setup works now, un-hardcoding stuff will come "RealSoonNow(tm)". --- shared-core/nv50_fifo.c | 3 +-- shared-core/nv50_instmem.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index d4c3ca87..ee1fb887 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -235,8 +235,7 @@ nv50_fifo_create_context(drm_device_t *dev, int channel) DRM_DEBUG("ch%d\n", channel); if (IS_G80) { - uint32_t ramfc_offset; - ramfc_offset = chan->ramin->gpuobj->im_pramin->start + 0x1000; + uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, 0x100, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index 4aca9e7d..1ea64b35 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -68,7 +68,7 @@ nv50_instmem_init(drm_device_t *dev) } else { unk = cb + 0x5400; cb0 = cb + 0x5440; - cb1 = cb + 0x1438; + cb1 = cb + 0x1478; } DRM_DEBUG("PRAMIN config:\n"); From d26ae22c2b17e0f193334cefec7d141befcfa1ee Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Wed, 11 Jul 2007 14:56:27 +0200 Subject: [PATCH 100/437] fixed bug that prevented PCIE cards from actually using PCIGART - NV50 will probably still have a problem --- shared-core/nouveau_fifo.c | 2 +- shared-core/nouveau_mem.c | 5 ++--- shared-core/nouveau_state.c | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index fcdc14c8..88f66d70 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -217,7 +217,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); } else if ( cb->flags & NOUVEAU_MEM_PCI) { - DRM_DEBUG("Creating CB in PCI memory starting at virt 0x%08llx size %d\n", cb->start, cb->size); + DRM_DEBUG("Creating CB in PCI memory\n", cb->start, cb->size); ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, cb->start, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 79f94fd4..c545acf2 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -345,9 +345,9 @@ int nouveau_mem_init(struct drm_device *dev) dev_priv->agp_phys = info.aperture_base; dev_priv->agp_available_size = info.aperture_size; + goto have_agp; } -goto have_agp; no_agp: dev_priv->pci_heap = NULL; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); @@ -357,8 +357,7 @@ no_agp: goto no_pci; } - DRM_DEBUG("Got %d KiB\n", (dev->sg->pages * PAGE_SIZE) >> 10); - if ( nouveau_mem_init_heap(&dev_priv->pci_heap, dev->sg->virtual, dev->sg->pages * PAGE_SIZE)) + if ( nouveau_mem_init_heap(&dev_priv->pci_heap, (uint64_t) dev->sg->virtual, dev->sg->pages * PAGE_SIZE)) { DRM_ERROR("Unable to initialize pci_heap!"); goto no_pci; diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 14b33a4a..fe3db168 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -445,11 +445,11 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) break; case NOUVEAU_GETPARAM_PCI_PHYSICAL: if ( dev -> sg ) - getparam.value=dev->sg->virtual; + getparam.value=(uint64_t) dev->sg->virtual; else { DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); - DRM_ERR(EINVAL); + return DRM_ERR(EINVAL); } break; case NOUVEAU_GETPARAM_FB_SIZE: From b301a9051b3fd9ad3dce6bcf32b06da7953a8b91 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Wed, 11 Jul 2007 15:01:37 +0200 Subject: [PATCH 101/437] NV50 will not attempt to use PCIGART now --- shared-core/nouveau_mem.c | 3 +++ shared-core/nouveau_object.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index c545acf2..a428b813 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -349,6 +349,9 @@ int nouveau_mem_init(struct drm_device *dev) } no_agp: + + if ( dev_priv->card_type >= NV_50 ) goto no_pci; + dev_priv->pci_heap = NULL; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); if ( drm_sg_alloc(dev, &sgreq) ) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 82944c2b..0fe32fda 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -890,6 +890,8 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, } } else { + if ( dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ + /*PCI*/ if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, (unsigned int) dev->sg->virtual, dev->sg->pages * PAGE_SIZE, From 5fbdf9da8bda996c0a474d13fe69d260f12ffce7 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Thu, 12 Jul 2007 02:35:39 +0200 Subject: [PATCH 102/437] fixed object creation code to not Oops on 64bits, worked around memalloc not working on 64bit for PCIGART --- shared-core/nouveau_fifo.c | 2 +- shared-core/nouveau_mem.c | 2 ++ shared-core/nouveau_object.c | 39 ++++++++++++++++++++++++++++++------ 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 88f66d70..7bcb1c8f 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -217,7 +217,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); } else if ( cb->flags & NOUVEAU_MEM_PCI) { - DRM_DEBUG("Creating CB in PCI memory\n", cb->start, cb->size); + DRM_DEBUG("Creating CB in PCI memory\n"); ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, cb->start, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index a428b813..790f6b5b 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -549,6 +549,8 @@ int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) block = find_block(dev_priv->fb_heap, memfree.region_offset); else if (memfree.flags&NOUVEAU_MEM_AGP) block = find_block(dev_priv->agp_heap, memfree.region_offset); + else if (memfree.flags&NOUVEAU_MEM_PCI) + block = find_block(dev_priv->pci_heap, memfree.region_offset); if (!block) return DRM_ERR(EFAULT); if (block->filp != filp) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 0fe32fda..4f7ad111 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -635,10 +635,10 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, else { uint32_t instance_offset; - uint32_t bus_addr; + uint64_t bus_addr; size = (uint32_t) size; - DRM_DEBUG("Creating PCI DMA object using virtual zone starting at 0x%08x, size %d\n", (uint32_t) offset, (uint32_t)size); + DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | (adjust << 20) | (access << 14) | @@ -651,10 +651,37 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ while ( size > 0 ) { - bus_addr = (uint32_t) page_address(vmalloc_to_page((void *) (uint32_t) offset)); + bus_addr = vmalloc_to_page(offset); + if ( ! bus_addr ) + { + DRM_ERROR("Couldn't map virtual address %#llx to a page number\n", offset); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(ENOMEM); + } + bus_addr = (uint64_t) page_address(bus_addr); + if ( ! bus_addr ) + { + DRM_ERROR("Couldn't find page address for address %#llx\n", offset); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(ENOMEM); + } bus_addr |= (offset & ~PAGE_MASK); bus_addr = virt_to_bus((void *)bus_addr); - frame = bus_addr & ~0x00000FFF; + if ( ! bus_addr ) + { + DRM_ERROR("Couldn't get bus address for %#llx\n", offset); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(ENOMEM); + } + + /*if ( bus_addr >= 1 << 32 ) + { + DRM_ERROR("Bus address %#llx is over 32 bits, Nvidia cards cannot address it !\n", bus_addr); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(EINVAL); + }*/ + + frame = (uint32_t) bus_addr & ~0x00000FFF; INSTANCE_WR(*gpuobj, instance_offset, frame | pte_flags); offset += PAGE_SIZE; instance_offset ++; @@ -894,11 +921,11 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, /*PCI*/ if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - (unsigned int) dev->sg->virtual, dev->sg->pages * PAGE_SIZE, + dev->sg->virtual, dev->sg->pages * PAGE_SIZE, NV_DMA_ACCESS_RW, NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); + return 0; //this is noncritical } ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); From 750371cb6ea9a64c9d4d4d3b9716c3c68d810d48 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 12 Jul 2007 10:15:16 +1000 Subject: [PATCH 103/437] nouveau: separate region_offset into map_handle and offset. --- linux-core/drmP.h | 3 +++ linux-core/drm_bufs.c | 4 +-- shared-core/nouveau_drm.h | 7 ++--- shared-core/nouveau_drv.h | 3 ++- shared-core/nouveau_fifo.c | 20 +++++++++----- shared-core/nouveau_mem.c | 48 +++++++++++++++++++--------------- shared-core/nouveau_notifier.c | 2 -- shared-core/nouveau_object.c | 6 ++--- shared-core/nv50_instmem.c | 2 +- 9 files changed, 55 insertions(+), 40 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c274f1fa..2bbc6200 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1048,6 +1048,9 @@ extern unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource); extern unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource); +extern drm_map_list_t *drm_find_matching_map(drm_device_t *dev, + drm_local_map_t *map); + /* DMA support (drm_dma.h) */ extern int drm_dma_setup(drm_device_t * dev); diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index a2c8a75e..2f3e4b2a 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -48,8 +48,7 @@ unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) } EXPORT_SYMBOL(drm_get_resource_len); -static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, - drm_local_map_t *map) +drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) { drm_map_list_t *entry; list_for_each_entry(entry, &dev->maplist, head) { @@ -62,6 +61,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, return NULL; } +EXPORT_SYMBOL(drm_find_matching_map); static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, unsigned long user_token, int hashed_handle) diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 7abe82e0..78ab9508 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -25,7 +25,7 @@ #ifndef __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__ -#define NOUVEAU_DRM_HEADER_PATCHLEVEL 8 +#define NOUVEAU_DRM_HEADER_PATCHLEVEL 9 typedef struct drm_nouveau_fifo_alloc { uint32_t fb_ctxdma_handle; @@ -80,12 +80,13 @@ typedef struct drm_nouveau_mem_alloc { int flags; int alignment; uint64_t size; // in bytes - uint64_t region_offset; + uint64_t offset; + drm_handle_t map_handle; } drm_nouveau_mem_alloc_t; typedef struct drm_nouveau_mem_free { - uint64_t region_offset; + uint64_t offset; int flags; } drm_nouveau_mem_free_t; diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index ea03fe37..99ddb586 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -34,7 +34,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 8 +#define DRIVER_PATCHLEVEL 9 #define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FLAGS 0xFFFF0000 @@ -50,6 +50,7 @@ struct mem_block { DRMFILE filp; /* 0: free, -1: heap, other: real files */ int flags; drm_local_map_t *map; + drm_handle_t map_handle; }; enum nouveau_flags { diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 7bcb1c8f..7114a931 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -213,8 +213,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) DRM_DEBUG("Creating CB in AGP memory\n"); ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - cb->start - dev_priv->agp_phys, - cb->size, + cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); } else if ( cb->flags & NOUVEAU_MEM_PCI) { DRM_DEBUG("Creating CB in PCI memory\n"); @@ -226,7 +225,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new (dev, channel, NV_CLASS_DMA_IN_MEMORY, - cb->start - drm_get_resource_start(dev, 1), + cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); } else { @@ -236,7 +235,8 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) */ ret = nouveau_gpuobj_dma_new (dev, channel, NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, NV_DMA_ACCESS_RO, + cb->start + drm_get_resource_start(dev, 1), + cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); } @@ -467,8 +467,9 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan; drm_nouveau_fifo_alloc_t init; + drm_map_list_t *entry; + struct nouveau_fifo *chan; int res; DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, @@ -501,12 +502,17 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) if (res != 0) return res; + entry = drm_find_matching_map(dev, chan->regs); + if (!entry) + return DRM_ERR(EINVAL); + init.ctrl = entry->user_token; + /* pass back FIFO map info to the caller */ - init.cmdbuf = chan->pushbuf_mem->start; + init.cmdbuf = chan->pushbuf_mem->map_handle; init.cmdbuf_size = chan->pushbuf_mem->size; /* and the notifier block */ - init.notifier = chan->notifier_block->start; + init.notifier = chan->notifier_block->map_handle; init.notifier_size = chan->notifier_block->size; DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 790f6b5b..d4b2bc04 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -339,8 +339,7 @@ int nouveau_mem_init(struct drm_device *dev) } if (nouveau_mem_init_heap(&dev_priv->agp_heap, - info.aperture_base, - info.aperture_size)) + 0, info.aperture_size)) goto no_agp; dev_priv->agp_phys = info.aperture_base; @@ -360,7 +359,8 @@ no_agp: goto no_pci; } - if ( nouveau_mem_init_heap(&dev_priv->pci_heap, (uint64_t) dev->sg->virtual, dev->sg->pages * PAGE_SIZE)) + if ( nouveau_mem_init_heap(&dev_priv->pci_heap, 0, + dev->sg->pages * PAGE_SIZE)) { DRM_ERROR("Unable to initialize pci_heap!"); goto no_pci; @@ -387,18 +387,13 @@ have_agp: /* On cards with > 256Mb, you can't map everything. * So we create a second FB heap for that type of memory */ if (nouveau_mem_init_heap(&dev_priv->fb_heap, - drm_get_resource_start(dev,1), - 256*1024*1024)) + 0, 256*1024*1024)) return DRM_ERR(ENOMEM); if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, - drm_get_resource_start(dev,1) + - 256*1024*1024, - fb_size-256*1024*1024)) + 256*1024*1024, fb_size-256*1024*1024)) return DRM_ERR(ENOMEM); } else { - if (nouveau_mem_init_heap(&dev_priv->fb_heap, - drm_get_resource_start(dev,1), - fb_size)) + if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) return DRM_ERR(ENOMEM); dev_priv->fb_nomap_heap=NULL; } @@ -473,23 +468,33 @@ alloc_ok: if (flags&NOUVEAU_MEM_MAPPED) { + drm_map_list_t *entry; int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; if (type == NOUVEAU_MEM_AGP) - ret = drm_addmap(dev, block->start - dev->agp->base, block->size, - _DRM_AGP, 0, &block->map); + ret = drm_addmap(dev, block->start + dev->agp->base, + block->size, _DRM_AGP, 0, &block->map); else if (type == NOUVEAU_MEM_FB) - ret = drm_addmap(dev, block->start, block->size, - _DRM_FRAME_BUFFER, 0, &block->map); + ret = drm_addmap(dev, block->start + dev_priv->fb_phys, + block->size, _DRM_FRAME_BUFFER, + 0, &block->map); else if (type == NOUVEAU_MEM_PCI) - ret = drm_addmap(dev, block->start - (unsigned long int)dev->sg->virtual, block->size, - _DRM_SCATTER_GATHER, 0, &block->map); + ret = drm_addmap(dev, block->start, block->size, + _DRM_SCATTER_GATHER, 0, &block->map); if (ret) { nouveau_mem_free_block(block); return NULL; } + + entry = drm_find_matching_map(dev, block->map); + if (!entry) { + nouveau_mem_free_block(block); + return NULL; + } + DRM_ERROR("user_token=0x%08x\n", entry->user_token); + block->map_handle = entry->user_token; } DRM_INFO("allocated 0x%llx\n", block->start); @@ -526,7 +531,8 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp); if (!block) return DRM_ERR(ENOMEM); - alloc.region_offset=block->start; + alloc.map_handle=block->map_handle; + alloc.offset=block->start; alloc.flags=block->flags; DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc)); @@ -546,11 +552,11 @@ int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) block=NULL; if (memfree.flags&NOUVEAU_MEM_FB) - block = find_block(dev_priv->fb_heap, memfree.region_offset); + block = find_block(dev_priv->fb_heap, memfree.offset); else if (memfree.flags&NOUVEAU_MEM_AGP) - block = find_block(dev_priv->agp_heap, memfree.region_offset); + block = find_block(dev_priv->agp_heap, memfree.offset); else if (memfree.flags&NOUVEAU_MEM_PCI) - block = find_block(dev_priv->pci_heap, memfree.region_offset); + block = find_block(dev_priv->pci_heap, memfree.offset); if (!block) return DRM_ERR(EFAULT); if (block->filp != filp) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 30216293..7d892064 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -95,10 +95,8 @@ nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, offset = chan->notifier_block->start + mem->start; if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { - offset -= drm_get_resource_start(dev, 1); target = NV_DMA_TARGET_VIDMEM; } else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { - offset -= dev_priv->agp_phys; target = NV_DMA_TARGET_AGP; } else { DRM_ERROR("Bad DMA target, flags 0x%08x!\n", diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 4f7ad111..e8ed708f 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -357,12 +357,12 @@ nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, DRM_ERROR("AII, no VRAM backing gpuobj\n"); return DRM_ERR(EINVAL); } - *inst = gpuobj->im_backing->start - dev_priv->fb_phys; + *inst = gpuobj->im_backing->start; return 0; } else { /* ...from local heap */ cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; - *inst = (cpramin->im_backing->start - dev_priv->fb_phys) + + *inst = cpramin->im_backing->start + (gpuobj->im_pramin->start - cpramin->im_pramin->start); return 0; } @@ -917,7 +917,7 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, } } else { - if ( dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ + if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ /*PCI*/ if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index 1ea64b35..81c60829 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -215,7 +215,7 @@ nv50_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) pte = (gpuobj->im_pramin->start >> 12) << 3; pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; - vram = gpuobj->im_backing->start - dev_priv->fb_phys; + vram = gpuobj->im_backing->start; if (pte == pte_end) { DRM_ERROR("WARNING: badness in bind() pte calc\n"); From 522a0c868c79b48c5434f39faab1a02ca4425a90 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 12 Jul 2007 11:39:45 +1000 Subject: [PATCH 104/437] nouveau: nuke left over debug message --- shared-core/nouveau_mem.c | 1 - 1 file changed, 1 deletion(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index d4b2bc04..9bfa8365 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -493,7 +493,6 @@ alloc_ok: nouveau_mem_free_block(block); return NULL; } - DRM_ERROR("user_token=0x%08x\n", entry->user_token); block->map_handle = entry->user_token; } From af317f1cc7136dbf03b39ced64c42202703c5066 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 12 Jul 2007 11:55:47 +1000 Subject: [PATCH 105/437] nouveau: mem_alloc() returns offsets, not absolute addresses now. --- shared-core/nouveau_object.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e8ed708f..16b38e95 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -646,6 +646,8 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, class)); INSTANCE_WR(*gpuobj, 1, size-1); + offset += dev->sg->virtual; + /*write starting at the third dword*/ instance_offset = 2; @@ -921,7 +923,7 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, /*PCI*/ if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - dev->sg->virtual, dev->sg->pages * PAGE_SIZE, + 0, dev->sg->pages * PAGE_SIZE, NV_DMA_ACCESS_RW, NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); From 851c950d988e5a47fa6add71427e5ef8d4dcf231 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 13 Jul 2007 02:18:59 +1000 Subject: [PATCH 106/437] nouveau: unbreak AGP --- shared-core/nouveau_mem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 9bfa8365..79d1bb87 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -473,8 +473,8 @@ alloc_ok: block->flags|=NOUVEAU_MEM_MAPPED; if (type == NOUVEAU_MEM_AGP) - ret = drm_addmap(dev, block->start + dev->agp->base, - block->size, _DRM_AGP, 0, &block->map); + ret = drm_addmap(dev, block->start, block->size, + _DRM_AGP, 0, &block->map); else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, From 0029713451af6f5f216079775ff77cae9b423c0e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 13 Jul 2007 15:09:31 +1000 Subject: [PATCH 107/437] nouveau: nuke internal typedefs, and drm_device_t use. --- shared-core/nouveau_drm.h | 40 ++--- shared-core/nouveau_drv.h | 320 +++++++++++++++++---------------- shared-core/nouveau_fifo.c | 49 ++--- shared-core/nouveau_irq.c | 36 ++-- shared-core/nouveau_mem.c | 25 +-- shared-core/nouveau_notifier.c | 23 +-- shared-core/nouveau_object.c | 133 +++++++------- shared-core/nouveau_state.c | 54 +++--- shared-core/nv04_fb.c | 6 +- shared-core/nv04_fifo.c | 16 +- shared-core/nv04_graph.c | 20 +-- shared-core/nv04_instmem.c | 20 +-- shared-core/nv04_mc.c | 6 +- shared-core/nv04_timer.c | 6 +- shared-core/nv10_fb.c | 6 +- shared-core/nv10_fifo.c | 16 +- shared-core/nv10_graph.c | 32 ++-- shared-core/nv20_graph.c | 40 ++--- shared-core/nv30_graph.c | 38 ++-- shared-core/nv40_fb.c | 6 +- shared-core/nv40_fifo.c | 16 +- shared-core/nv40_graph.c | 64 +++---- shared-core/nv40_mc.c | 6 +- shared-core/nv50_fifo.c | 58 +++--- shared-core/nv50_graph.c | 42 ++--- shared-core/nv50_instmem.c | 22 +-- shared-core/nv50_mc.c | 6 +- 27 files changed, 554 insertions(+), 552 deletions(-) diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 78ab9508..e2a9ea83 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -27,7 +27,7 @@ #define NOUVEAU_DRM_HEADER_PATCHLEVEL 9 -typedef struct drm_nouveau_fifo_alloc { +struct drm_nouveau_fifo_alloc { uint32_t fb_ctxdma_handle; uint32_t tt_ctxdma_handle; @@ -42,27 +42,24 @@ typedef struct drm_nouveau_fifo_alloc { /* Notifier memory */ drm_handle_t notifier; int notifier_size; -} -drm_nouveau_fifo_alloc_t; +}; -typedef struct drm_nouveau_grobj_alloc { +struct drm_nouveau_grobj_alloc { int channel; uint32_t handle; int class; -} -drm_nouveau_grobj_alloc_t; +}; #define NOUVEAU_MEM_ACCESS_RO 1 #define NOUVEAU_MEM_ACCESS_WO 2 #define NOUVEAU_MEM_ACCESS_RW 3 -typedef struct drm_nouveau_notifier_alloc { +struct drm_nouveau_notifier_alloc { int channel; uint32_t handle; int count; uint32_t offset; -} -drm_nouveau_notifier_alloc_t; +}; #define NOUVEAU_MEM_FB 0x00000001 #define NOUVEAU_MEM_AGP 0x00000002 @@ -76,20 +73,18 @@ drm_nouveau_notifier_alloc_t; #define NOUVEAU_MEM_INSTANCE 0x00000200 /* internal */ #define NOUVEAU_MEM_NOTIFIER 0x00000400 /* internal */ -typedef struct drm_nouveau_mem_alloc { +struct drm_nouveau_mem_alloc { int flags; int alignment; uint64_t size; // in bytes uint64_t offset; drm_handle_t map_handle; -} -drm_nouveau_mem_alloc_t; +}; -typedef struct drm_nouveau_mem_free { +struct drm_nouveau_mem_free { uint64_t offset; int flags; -} -drm_nouveau_mem_free_t; +}; /* FIXME : maybe unify {GET,SET}PARAMs */ #define NOUVEAU_GETPARAM_PCI_VENDOR 3 @@ -100,19 +95,17 @@ drm_nouveau_mem_free_t; #define NOUVEAU_GETPARAM_FB_SIZE 8 #define NOUVEAU_GETPARAM_AGP_SIZE 9 #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 -typedef struct drm_nouveau_getparam { +struct drm_nouveau_getparam { uint64_t param; uint64_t value; -} -drm_nouveau_getparam_t; +}; #define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1 #define NOUVEAU_SETPARAM_CMDBUF_SIZE 2 -typedef struct drm_nouveau_setparam { +struct drm_nouveau_setparam { uint64_t param; uint64_t value; -} -drm_nouveau_setparam_t; +}; enum nouveau_card_type { NV_UNKNOWN =0, @@ -142,12 +135,11 @@ enum nouveau_bus_type { #define NOUVEAU_MAX_SAREA_CLIPRECTS 16 -typedef struct drm_nouveau_sarea { +struct drm_nouveau_sarea { /* the cliprects */ drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; unsigned int nbox; -} -drm_nouveau_sarea_t; +}; #define DRM_NOUVEAU_FIFO_ALLOC 0x00 #define DRM_NOUVEAU_GROBJ_ALLOC 0x01 diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 99ddb586..4fa979e6 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -66,7 +66,7 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_FAKE (1 << 3) -typedef struct nouveau_gpuobj { +struct nouveau_gpuobj { struct nouveau_gpuobj *next; struct nouveau_gpuobj *prev; @@ -80,17 +80,17 @@ typedef struct nouveau_gpuobj { uint32_t engine; uint32_t class; -} nouveau_gpuobj_t; +}; -typedef struct nouveau_gpuobj_ref { +struct nouveau_gpuobj_ref { struct nouveau_gpuobj_ref *next; - nouveau_gpuobj_t *gpuobj; + struct nouveau_gpuobj *gpuobj; uint32_t instance; int channel; int handle; -} nouveau_gpuobj_ref_t; +}; struct nouveau_fifo { @@ -102,9 +102,9 @@ struct nouveau_fifo drm_local_map_t *regs; /* DMA push buffer */ - nouveau_gpuobj_ref_t *pushbuf; - struct mem_block *pushbuf_mem; - uint32_t pushbuf_base; + struct nouveau_gpuobj_ref *pushbuf; + struct mem_block *pushbuf_mem; + uint32_t pushbuf_base; /* Notifier memory */ struct mem_block *notifier_block; @@ -112,17 +112,17 @@ struct nouveau_fifo drm_local_map_t *notifier_map; /* PFIFO context */ - nouveau_gpuobj_ref_t *ramfc; + struct nouveau_gpuobj_ref *ramfc; /* PGRAPH context */ - nouveau_gpuobj_ref_t *ramin_grctx; + struct nouveau_gpuobj_ref *ramin_grctx; uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */ /* Objects */ - nouveau_gpuobj_ref_t *ramin; /* Private instmem */ - struct mem_block *ramin_heap; /* Private PRAMIN heap */ - nouveau_gpuobj_ref_t *ramht; /* Hash table */ - nouveau_gpuobj_ref_t *ramht_refs; /* Objects referenced by RAMHT */ + struct nouveau_gpuobj_ref *ramin; /* Private instmem */ + struct mem_block *ramin_heap; /* Private PRAMIN heap */ + struct nouveau_gpuobj_ref *ramht; /* Hash table */ + struct nouveau_gpuobj_ref *ramht_refs; /* Objects referenced by RAMHT */ }; struct nouveau_config { @@ -132,59 +132,59 @@ struct nouveau_config { } cmdbuf; }; -typedef struct nouveau_engine_func { +struct nouveau_engine_func { struct { void *priv; - int (*init)(drm_device_t *dev); - void (*takedown)(drm_device_t *dev); + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); - int (*populate)(drm_device_t *, nouveau_gpuobj_t *, + int (*populate)(struct drm_device *, struct nouveau_gpuobj *, uint32_t *size); - void (*clear)(drm_device_t *, nouveau_gpuobj_t *); - int (*bind)(drm_device_t *, nouveau_gpuobj_t *); - int (*unbind)(drm_device_t *, nouveau_gpuobj_t *); + void (*clear)(struct drm_device *, struct nouveau_gpuobj *); + int (*bind)(struct drm_device *, struct nouveau_gpuobj *); + int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); } instmem; struct { - int (*init)(drm_device_t *dev); - void (*takedown)(drm_device_t *dev); + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); } mc; struct { - int (*init)(drm_device_t *dev); - void (*takedown)(drm_device_t *dev); + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); } timer; struct { - int (*init)(drm_device_t *dev); - void (*takedown)(drm_device_t *dev); + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); } fb; struct { - int (*init)(drm_device_t *); - void (*takedown)(drm_device_t *); + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); - int (*create_context)(drm_device_t *, int channel); - void (*destroy_context)(drm_device_t *, int channel); - int (*load_context)(drm_device_t *, int channel); - int (*save_context)(drm_device_t *, int channel); + int (*create_context)(struct drm_device *, int channel); + void (*destroy_context)(struct drm_device *, int channel); + int (*load_context)(struct drm_device *, int channel); + int (*save_context)(struct drm_device *, int channel); } graph; struct { void *priv; - int (*init)(drm_device_t *); - void (*takedown)(drm_device_t *); + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); - int (*create_context)(drm_device_t *, int channel); - void (*destroy_context)(drm_device_t *, int channel); - int (*load_context)(drm_device_t *, int channel); - int (*save_context)(drm_device_t *, int channel); + int (*create_context)(struct drm_device *, int channel); + void (*destroy_context)(struct drm_device *, int channel); + int (*load_context)(struct drm_device *, int channel); + int (*save_context)(struct drm_device *, int channel); } fifo; -} nouveau_engine_func_t; +}; -typedef struct drm_nouveau_private { +struct drm_nouveau_private { enum { NOUVEAU_CARD_INIT_DOWN, NOUVEAU_CARD_INIT_DONE, @@ -207,7 +207,7 @@ typedef struct drm_nouveau_private { struct nouveau_engine_func Engine; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ - nouveau_gpuobj_t *ramht; + struct nouveau_gpuobj *ramht; uint32_t ramin_rsvd_vram; uint32_t ramht_offset; uint32_t ramht_size; @@ -234,16 +234,15 @@ typedef struct drm_nouveau_private { /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ uint32_t ctx_table_size; - nouveau_gpuobj_ref_t *ctx_table; + struct nouveau_gpuobj_ref *ctx_table; struct nouveau_config config; - nouveau_gpuobj_t *gpuobj_all; -} -drm_nouveau_private_t; + struct nouveau_gpuobj *gpuobj_all; +}; /* nouveau_state.c */ -extern void nouveau_preclose(drm_device_t * dev, DRMFILE filp); +extern void nouveau_preclose(struct drm_device * dev, DRMFILE filp); extern int nouveau_load(struct drm_device *dev, unsigned long flags); extern int nouveau_firstopen(struct drm_device *dev); extern void nouveau_lastclose(struct drm_device *dev); @@ -271,172 +270,175 @@ extern int nouveau_mem_init(struct drm_device *dev); extern void nouveau_mem_close(struct drm_device *dev); /* nouveau_notifier.c */ -extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE); -extern void nouveau_notifier_takedown_channel(drm_device_t *, int channel); -extern int nouveau_notifier_alloc(drm_device_t *, int channel, +extern int nouveau_notifier_init_channel(struct drm_device *, int channel, DRMFILE); +extern void nouveau_notifier_takedown_channel(struct drm_device *, int channel); +extern int nouveau_notifier_alloc(struct drm_device *, int channel, uint32_t handle, int cout, uint32_t *offset); extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS); /* nouveau_fifo.c */ -extern int nouveau_fifo_init(drm_device_t *dev); -extern int nouveau_fifo_number(drm_device_t *dev); -extern int nouveau_fifo_ctx_size(drm_device_t *dev); -extern void nouveau_fifo_cleanup(drm_device_t *dev, DRMFILE filp); -extern int nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel); -extern void nouveau_fifo_free(drm_device_t *dev, int channel); +extern int nouveau_fifo_init(struct drm_device *dev); +extern int nouveau_fifo_number(struct drm_device *dev); +extern int nouveau_fifo_ctx_size(struct drm_device *dev); +extern void nouveau_fifo_cleanup(struct drm_device *dev, DRMFILE filp); +extern int nouveau_fifo_owner(struct drm_device *dev, DRMFILE filp, int channel); +extern void nouveau_fifo_free(struct drm_device *dev, int channel); /* nouveau_object.c */ -extern void nouveau_gpuobj_takedown(drm_device_t *dev); -extern int nouveau_gpuobj_channel_init(drm_device_t *, int channel, +extern void nouveau_gpuobj_takedown(struct drm_device *dev); +extern int nouveau_gpuobj_channel_init(struct drm_device *, int channel, uint32_t vram_h, uint32_t tt_h); -extern void nouveau_gpuobj_channel_takedown(drm_device_t *, int channel); -extern int nouveau_gpuobj_new(drm_device_t *, int channel, int size, int align, - uint32_t flags, nouveau_gpuobj_t **); -extern int nouveau_gpuobj_del(drm_device_t *, nouveau_gpuobj_t **); -extern int nouveau_gpuobj_ref_add(drm_device_t *, int channel, uint32_t handle, - nouveau_gpuobj_t *, nouveau_gpuobj_ref_t **); -extern int nouveau_gpuobj_ref_del(drm_device_t *, nouveau_gpuobj_ref_t **); -extern int nouveau_gpuobj_new_ref(drm_device_t *, int chan_obj, int chan_ref, +extern void nouveau_gpuobj_channel_takedown(struct drm_device *, int channel); +extern int nouveau_gpuobj_new(struct drm_device *, int channel, int size, int align, + uint32_t flags, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_ref_add(struct drm_device *, int channel, uint32_t handle, + struct nouveau_gpuobj *, + struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_ref_del(struct drm_device *, struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_new_ref(struct drm_device *, int chan_obj, int chan_ref, uint32_t handle, int size, int align, - uint32_t flags, nouveau_gpuobj_ref_t **); -extern int nouveau_gpuobj_new_fake(drm_device_t *, uint32_t offset, + uint32_t flags, struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_new_fake(struct drm_device *, uint32_t offset, uint32_t size, uint32_t flags, - nouveau_gpuobj_t**, nouveau_gpuobj_ref_t**); -extern int nouveau_gpuobj_dma_new(drm_device_t *, int channel, int class, + struct nouveau_gpuobj**, + struct nouveau_gpuobj_ref**); +extern int nouveau_gpuobj_dma_new(struct drm_device *, int channel, int class, uint64_t offset, uint64_t size, - int access, int target, nouveau_gpuobj_t **); -extern int nouveau_gpuobj_gr_new(drm_device_t *, int channel, int class, - nouveau_gpuobj_t **); + int access, int target, + struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, + struct nouveau_gpuobj **); extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); /* nouveau_irq.c */ extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); -extern void nouveau_irq_preinstall(drm_device_t*); -extern void nouveau_irq_postinstall(drm_device_t*); -extern void nouveau_irq_uninstall(drm_device_t*); +extern void nouveau_irq_preinstall(struct drm_device*); +extern void nouveau_irq_postinstall(struct drm_device*); +extern void nouveau_irq_uninstall(struct drm_device*); /* nv04_fb.c */ -extern int nv04_fb_init(drm_device_t *dev); -extern void nv04_fb_takedown(drm_device_t *dev); +extern int nv04_fb_init(struct drm_device *dev); +extern void nv04_fb_takedown(struct drm_device *dev); /* nv10_fb.c */ -extern int nv10_fb_init(drm_device_t *dev); -extern void nv10_fb_takedown(drm_device_t *dev); +extern int nv10_fb_init(struct drm_device *dev); +extern void nv10_fb_takedown(struct drm_device *dev); /* nv40_fb.c */ -extern int nv40_fb_init(drm_device_t *dev); -extern void nv40_fb_takedown(drm_device_t *dev); +extern int nv40_fb_init(struct drm_device *dev); +extern void nv40_fb_takedown(struct drm_device *dev); /* nv04_fifo.c */ -extern int nv04_fifo_create_context(drm_device_t *dev, int channel); -extern void nv04_fifo_destroy_context(drm_device_t *dev, int channel); -extern int nv04_fifo_load_context(drm_device_t *dev, int channel); -extern int nv04_fifo_save_context(drm_device_t *dev, int channel); +extern int nv04_fifo_create_context(struct drm_device *dev, int channel); +extern void nv04_fifo_destroy_context(struct drm_device *dev, int channel); +extern int nv04_fifo_load_context(struct drm_device *dev, int channel); +extern int nv04_fifo_save_context(struct drm_device *dev, int channel); /* nv10_fifo.c */ -extern int nv10_fifo_create_context(drm_device_t *dev, int channel); -extern void nv10_fifo_destroy_context(drm_device_t *dev, int channel); -extern int nv10_fifo_load_context(drm_device_t *dev, int channel); -extern int nv10_fifo_save_context(drm_device_t *dev, int channel); +extern int nv10_fifo_create_context(struct drm_device *dev, int channel); +extern void nv10_fifo_destroy_context(struct drm_device *dev, int channel); +extern int nv10_fifo_load_context(struct drm_device *dev, int channel); +extern int nv10_fifo_save_context(struct drm_device *dev, int channel); /* nv40_fifo.c */ -extern int nv40_fifo_create_context(drm_device_t *, int channel); -extern void nv40_fifo_destroy_context(drm_device_t *, int channel); -extern int nv40_fifo_load_context(drm_device_t *, int channel); -extern int nv40_fifo_save_context(drm_device_t *, int channel); +extern int nv40_fifo_create_context(struct drm_device *, int channel); +extern void nv40_fifo_destroy_context(struct drm_device *, int channel); +extern int nv40_fifo_load_context(struct drm_device *, int channel); +extern int nv40_fifo_save_context(struct drm_device *, int channel); /* nv50_fifo.c */ -extern int nv50_fifo_init(drm_device_t *); -extern void nv50_fifo_takedown(drm_device_t *); -extern int nv50_fifo_create_context(drm_device_t *, int channel); -extern void nv50_fifo_destroy_context(drm_device_t *, int channel); -extern int nv50_fifo_load_context(drm_device_t *, int channel); -extern int nv50_fifo_save_context(drm_device_t *, int channel); +extern int nv50_fifo_init(struct drm_device *); +extern void nv50_fifo_takedown(struct drm_device *); +extern int nv50_fifo_create_context(struct drm_device *, int channel); +extern void nv50_fifo_destroy_context(struct drm_device *, int channel); +extern int nv50_fifo_load_context(struct drm_device *, int channel); +extern int nv50_fifo_save_context(struct drm_device *, int channel); /* nv04_graph.c */ -extern void nouveau_nv04_context_switch(drm_device_t *dev); -extern int nv04_graph_init(drm_device_t *dev); -extern void nv04_graph_takedown(drm_device_t *dev); -extern int nv04_graph_create_context(drm_device_t *dev, int channel); -extern void nv04_graph_destroy_context(drm_device_t *dev, int channel); -extern int nv04_graph_load_context(drm_device_t *dev, int channel); -extern int nv04_graph_save_context(drm_device_t *dev, int channel); +extern void nouveau_nv04_context_switch(struct drm_device *dev); +extern int nv04_graph_init(struct drm_device *dev); +extern void nv04_graph_takedown(struct drm_device *dev); +extern int nv04_graph_create_context(struct drm_device *dev, int channel); +extern void nv04_graph_destroy_context(struct drm_device *dev, int channel); +extern int nv04_graph_load_context(struct drm_device *dev, int channel); +extern int nv04_graph_save_context(struct drm_device *dev, int channel); /* nv10_graph.c */ -extern void nouveau_nv10_context_switch(drm_device_t *dev); -extern int nv10_graph_init(drm_device_t *dev); -extern void nv10_graph_takedown(drm_device_t *dev); -extern int nv10_graph_create_context(drm_device_t *dev, int channel); -extern void nv10_graph_destroy_context(drm_device_t *dev, int channel); -extern int nv10_graph_load_context(drm_device_t *dev, int channel); -extern int nv10_graph_save_context(drm_device_t *dev, int channel); +extern void nouveau_nv10_context_switch(struct drm_device *dev); +extern int nv10_graph_init(struct drm_device *dev); +extern void nv10_graph_takedown(struct drm_device *dev); +extern int nv10_graph_create_context(struct drm_device *dev, int channel); +extern void nv10_graph_destroy_context(struct drm_device *dev, int channel); +extern int nv10_graph_load_context(struct drm_device *dev, int channel); +extern int nv10_graph_save_context(struct drm_device *dev, int channel); /* nv20_graph.c */ -extern void nouveau_nv20_context_switch(drm_device_t *dev); -extern int nv20_graph_init(drm_device_t *dev); -extern void nv20_graph_takedown(drm_device_t *dev); -extern int nv20_graph_create_context(drm_device_t *dev, int channel); -extern void nv20_graph_destroy_context(drm_device_t *dev, int channel); -extern int nv20_graph_load_context(drm_device_t *dev, int channel); -extern int nv20_graph_save_context(drm_device_t *dev, int channel); +extern void nouveau_nv20_context_switch(struct drm_device *dev); +extern int nv20_graph_init(struct drm_device *dev); +extern void nv20_graph_takedown(struct drm_device *dev); +extern int nv20_graph_create_context(struct drm_device *dev, int channel); +extern void nv20_graph_destroy_context(struct drm_device *dev, int channel); +extern int nv20_graph_load_context(struct drm_device *dev, int channel); +extern int nv20_graph_save_context(struct drm_device *dev, int channel); /* nv30_graph.c */ -extern int nv30_graph_init(drm_device_t *dev); -extern void nv30_graph_takedown(drm_device_t *dev); -extern int nv30_graph_create_context(drm_device_t *, int channel); -extern void nv30_graph_destroy_context(drm_device_t *, int channel); -extern int nv30_graph_load_context(drm_device_t *, int channel); -extern int nv30_graph_save_context(drm_device_t *, int channel); +extern int nv30_graph_init(struct drm_device *dev); +extern void nv30_graph_takedown(struct drm_device *dev); +extern int nv30_graph_create_context(struct drm_device *, int channel); +extern void nv30_graph_destroy_context(struct drm_device *, int channel); +extern int nv30_graph_load_context(struct drm_device *, int channel); +extern int nv30_graph_save_context(struct drm_device *, int channel); /* nv40_graph.c */ -extern int nv40_graph_init(drm_device_t *); -extern void nv40_graph_takedown(drm_device_t *); -extern int nv40_graph_create_context(drm_device_t *, int channel); -extern void nv40_graph_destroy_context(drm_device_t *, int channel); -extern int nv40_graph_load_context(drm_device_t *, int channel); -extern int nv40_graph_save_context(drm_device_t *, int channel); +extern int nv40_graph_init(struct drm_device *); +extern void nv40_graph_takedown(struct drm_device *); +extern int nv40_graph_create_context(struct drm_device *, int channel); +extern void nv40_graph_destroy_context(struct drm_device *, int channel); +extern int nv40_graph_load_context(struct drm_device *, int channel); +extern int nv40_graph_save_context(struct drm_device *, int channel); /* nv50_graph.c */ -extern int nv50_graph_init(drm_device_t *); -extern void nv50_graph_takedown(drm_device_t *); -extern int nv50_graph_create_context(drm_device_t *, int channel); -extern void nv50_graph_destroy_context(drm_device_t *, int channel); -extern int nv50_graph_load_context(drm_device_t *, int channel); -extern int nv50_graph_save_context(drm_device_t *, int channel); +extern int nv50_graph_init(struct drm_device *); +extern void nv50_graph_takedown(struct drm_device *); +extern int nv50_graph_create_context(struct drm_device *, int channel); +extern void nv50_graph_destroy_context(struct drm_device *, int channel); +extern int nv50_graph_load_context(struct drm_device *, int channel); +extern int nv50_graph_save_context(struct drm_device *, int channel); /* nv04_instmem.c */ -extern int nv04_instmem_init(drm_device_t *dev); -extern void nv04_instmem_takedown(drm_device_t *dev); -extern int nv04_instmem_populate(drm_device_t*, nouveau_gpuobj_t*, +extern int nv04_instmem_init(struct drm_device *dev); +extern void nv04_instmem_takedown(struct drm_device *dev); +extern int nv04_instmem_populate(struct drm_device*, struct nouveau_gpuobj*, uint32_t *size); -extern void nv04_instmem_clear(drm_device_t*, nouveau_gpuobj_t*); -extern int nv04_instmem_bind(drm_device_t*, nouveau_gpuobj_t*); -extern int nv04_instmem_unbind(drm_device_t*, nouveau_gpuobj_t*); +extern void nv04_instmem_clear(struct drm_device*, struct nouveau_gpuobj*); +extern int nv04_instmem_bind(struct drm_device*, struct nouveau_gpuobj*); +extern int nv04_instmem_unbind(struct drm_device*, struct nouveau_gpuobj*); /* nv50_instmem.c */ -extern int nv50_instmem_init(drm_device_t *dev); -extern void nv50_instmem_takedown(drm_device_t *dev); -extern int nv50_instmem_populate(drm_device_t*, nouveau_gpuobj_t*, +extern int nv50_instmem_init(struct drm_device *dev); +extern void nv50_instmem_takedown(struct drm_device *dev); +extern int nv50_instmem_populate(struct drm_device*, struct nouveau_gpuobj*, uint32_t *size); -extern void nv50_instmem_clear(drm_device_t*, nouveau_gpuobj_t*); -extern int nv50_instmem_bind(drm_device_t*, nouveau_gpuobj_t*); -extern int nv50_instmem_unbind(drm_device_t*, nouveau_gpuobj_t*); +extern void nv50_instmem_clear(struct drm_device*, struct nouveau_gpuobj*); +extern int nv50_instmem_bind(struct drm_device*, struct nouveau_gpuobj*); +extern int nv50_instmem_unbind(struct drm_device*, struct nouveau_gpuobj*); /* nv04_mc.c */ -extern int nv04_mc_init(drm_device_t *dev); -extern void nv04_mc_takedown(drm_device_t *dev); +extern int nv04_mc_init(struct drm_device *dev); +extern void nv04_mc_takedown(struct drm_device *dev); /* nv40_mc.c */ -extern int nv40_mc_init(drm_device_t *dev); -extern void nv40_mc_takedown(drm_device_t *dev); +extern int nv40_mc_init(struct drm_device *dev); +extern void nv40_mc_takedown(struct drm_device *dev); /* nv50_mc.c */ -extern int nv50_mc_init(drm_device_t *dev); -extern void nv50_mc_takedown(drm_device_t *dev); +extern int nv50_mc_init(struct drm_device *dev); +extern void nv50_mc_takedown(struct drm_device *dev); /* nv04_timer.c */ -extern int nv04_timer_init(drm_device_t *dev); -extern void nv04_timer_takedown(drm_device_t *dev); +extern int nv04_timer_init(struct drm_device *dev); +extern void nv04_timer_takedown(struct drm_device *dev); extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 7114a931..c769f58f 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -29,9 +29,9 @@ /* returns the number of hw fifos */ -int nouveau_fifo_number(drm_device_t* dev) +int nouveau_fifo_number(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { case NV_03: @@ -47,9 +47,9 @@ int nouveau_fifo_number(drm_device_t* dev) } /* returns the size of fifo context */ -int nouveau_fifo_ctx_size(drm_device_t* dev) +int nouveau_fifo_ctx_size(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; if (dev_priv->card_type >= NV_40) return 128; @@ -68,9 +68,9 @@ int nouveau_fifo_ctx_size(drm_device_t* dev) * voir nv_driver.c : NVPreInit */ -static int nouveau_fifo_instmem_configure(drm_device_t *dev) +static int nouveau_fifo_instmem_configure(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; NV_WRITE(NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | @@ -109,9 +109,9 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev) return 0; } -int nouveau_fifo_init(drm_device_t *dev) +int nouveau_fifo_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & @@ -187,12 +187,12 @@ int nouveau_fifo_init(drm_device_t *dev) static int nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_config *config = &dev_priv->config; struct mem_block *cb; int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); - nouveau_gpuobj_t *pushbuf = NULL; + struct nouveau_gpuobj *pushbuf = NULL; int ret; /* Defaults for unconfigured values */ @@ -258,12 +258,12 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } /* allocates and initializes a fifo for user space consumption */ -int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, +int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, DRMFILE filp, uint32_t vram_handle, uint32_t tt_handle) { int ret; - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_engine_func_t *engine = &dev_priv->Engine; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_fifo *chan; int channel; @@ -392,10 +392,10 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, } /* stops a fifo */ -void nouveau_fifo_free(drm_device_t* dev, int channel) +void nouveau_fifo_free(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_engine_func_t *engine = &dev_priv->Engine; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (!chan) { @@ -436,10 +436,10 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) } /* cleanups all the fifos from filp */ -void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp) +void nouveau_fifo_cleanup(struct drm_device *dev, DRMFILE filp) { int i; - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("clearing FIFO enables from filp\n"); for(i=0;idev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (channel >= nouveau_fifo_number(dev)) return 0; @@ -466,13 +466,14 @@ nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel) static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_private_t *dev_priv = dev->dev_private; - drm_nouveau_fifo_alloc_t init; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_fifo_alloc init; drm_map_list_t *entry; struct nouveau_fifo *chan; int res; - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(init, + (struct drm_nouveau_fifo_alloc __user *) data, sizeof(init)); if (init.fb_ctxdma_handle == ~0 || init.tt_ctxdma_handle == ~0) @@ -515,7 +516,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) init.notifier = chan->notifier_block->map_handle; init.notifier_size = chan->notifier_block->size; - DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, + DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_fifo_alloc __user *)data, init, sizeof(init)); return 0; } diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index b4102dd8..451262a1 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -36,9 +36,9 @@ #include "nouveau_drv.h" #include "nouveau_reg.h" -void nouveau_irq_preinstall(drm_device_t *dev) +void nouveau_irq_preinstall(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("IRQ: preinst\n"); @@ -71,9 +71,9 @@ void nouveau_irq_preinstall(drm_device_t *dev) NV_WRITE(NV03_PMC_INTR_EN_0, 0); } -void nouveau_irq_postinstall(drm_device_t *dev) +void nouveau_irq_postinstall(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (!dev_priv) { DRM_ERROR("AIII, no dev_priv\n"); @@ -107,9 +107,9 @@ void nouveau_irq_postinstall(drm_device_t *dev) NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); } -void nouveau_irq_uninstall(drm_device_t *dev) +void nouveau_irq_uninstall(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (!dev_priv) { DRM_ERROR("AIII, no dev_priv\n"); @@ -138,10 +138,10 @@ void nouveau_irq_uninstall(drm_device_t *dev) NV_WRITE(NV03_PMC_INTR_EN_0, 0); } -static void nouveau_fifo_irq_handler(drm_device_t *dev) +static void nouveau_fifo_irq_handler(struct drm_device *dev) { uint32_t status, chmode, chstat, channel; - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; status = NV_READ(NV03_PFIFO_INTR_0); if (!status) @@ -200,9 +200,9 @@ static void nouveau_fifo_irq_handler(drm_device_t *dev) } #if 0 -static void nouveau_nv04_context_switch(drm_device_t *dev) +static void nouveau_nv04_context_switch(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t channel,i; uint32_t max=0; NV_WRITE(NV04_PGRAPH_FIFO,0x0); @@ -247,9 +247,9 @@ static void nouveau_nv04_context_switch(drm_device_t *dev) #endif static void -nouveau_graph_dump_trap_info(drm_device_t *dev) +nouveau_graph_dump_trap_info(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t address; uint32_t channel, class; uint32_t method, subc, data; @@ -273,10 +273,10 @@ nouveau_graph_dump_trap_info(drm_device_t *dev) ); } -static void nouveau_pgraph_irq_handler(drm_device_t *dev) +static void nouveau_pgraph_irq_handler(struct drm_device *dev) { uint32_t status; - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; status = NV_READ(NV03_PGRAPH_INTR); if (!status) @@ -379,9 +379,9 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev) NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); } -static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc) +static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (crtc&1) { NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); } @@ -393,8 +393,8 @@ static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc) irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t*)arg; - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = (struct drm_device*)arg; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t status; status = NV_READ(NV03_PMC_INTR_0); diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 79d1bb87..2b2418fb 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -208,7 +208,7 @@ void nouveau_mem_takedown(struct mem_block **heap) void nouveau_mem_close(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); if ( dev_priv->pci_heap ) @@ -220,7 +220,7 @@ void nouveau_mem_close(struct drm_device *dev) /* returns the amount of FB ram in bytes */ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { case NV_03: @@ -285,7 +285,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) int nouveau_mem_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_size; drm_scatter_gather_t sgreq; dev_priv->agp_phys=0; @@ -405,7 +405,7 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6 { struct mem_block *block; int type; - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; /* * Make things easier on ourselves: all allocations are page-aligned. @@ -515,8 +515,8 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_private_t *dev_priv = dev->dev_private; - drm_nouveau_mem_alloc_t alloc; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_mem_alloc alloc; struct mem_block *block; if (!dev_priv) { @@ -524,7 +524,8 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - DRM_COPY_FROM_USER_IOCTL(alloc, (drm_nouveau_mem_alloc_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(alloc, + (struct drm_nouveau_mem_alloc_t __user *) data, sizeof(alloc)); block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp); @@ -534,7 +535,8 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) alloc.offset=block->start; alloc.flags=block->flags; - DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc)); + DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_mem_alloc __user *)data, + alloc, sizeof(alloc)); return 0; } @@ -542,11 +544,12 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_private_t *dev_priv = dev->dev_private; - drm_nouveau_mem_free_t memfree; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_mem_free memfree; struct mem_block *block; - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_nouveau_mem_free_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(memfree, + (struct drm_nouveau_mem_free_t __user *)data, sizeof(memfree)); block=NULL; diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 7d892064..36dba654 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -30,9 +30,9 @@ #include "nouveau_drv.h" int -nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp) +nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; int flags, ret; @@ -56,9 +56,9 @@ nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp) } void -nouveau_notifier_takedown_channel(drm_device_t *dev, int channel) +nouveau_notifier_takedown_channel(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->notifier_block) { @@ -70,12 +70,12 @@ nouveau_notifier_takedown_channel(drm_device_t *dev, int channel) } int -nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, +nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, int count, uint32_t *b_offset) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_t *nobj = NULL; + struct nouveau_gpuobj *nobj = NULL; struct mem_block *mem; uint32_t offset; int target, ret; @@ -127,11 +127,12 @@ int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_notifier_alloc_t na; + struct drm_nouveau_notifier_alloc na; int ret; - DRM_COPY_FROM_USER_IOCTL(na, (drm_nouveau_notifier_alloc_t __user*)data, - sizeof(na)); + DRM_COPY_FROM_USER_IOCTL(na, + (struct drm_nouveau_notifier_alloc __user*)data, + sizeof(na)); if (!nouveau_fifo_owner(dev, filp, na.channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", @@ -144,7 +145,7 @@ nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((drm_nouveau_notifier_alloc_t __user*)data, + DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_notifier_alloc __user*)data, na, sizeof(na)); return 0; } diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 16b38e95..c5697d8e 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -66,9 +66,9 @@ is given as: */ static uint32_t -nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) +nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; uint32_t hash = 0; int i; @@ -85,10 +85,10 @@ nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) } static int -nouveau_ramht_entry_valid(drm_device_t *dev, nouveau_gpuobj_t *ramht, +nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, uint32_t offset) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); if (dev_priv->card_type < NV_40) @@ -97,12 +97,12 @@ nouveau_ramht_entry_valid(drm_device_t *dev, nouveau_gpuobj_t *ramht, } static int -nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) +nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; - nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; - nouveau_gpuobj_t *gpuobj = ref->gpuobj; + struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + struct nouveau_gpuobj *gpuobj = ref->gpuobj; uint32_t ctx, co, ho; if (!ramht) { @@ -146,11 +146,11 @@ nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) } static void -nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) +nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; - nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; uint32_t co, ho; if (!ramht) { @@ -180,13 +180,13 @@ nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) } int -nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, - uint32_t flags, nouveau_gpuobj_t **gpuobj_ret) +nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, + uint32_t flags, struct nouveau_gpuobj **gpuobj_ret) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_engine_func_t *engine = &dev_priv->Engine; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_fifo *chan = NULL; - nouveau_gpuobj_t *gpuobj; + struct nouveau_gpuobj *gpuobj; struct mem_block *pramin = NULL; int ret; @@ -270,10 +270,10 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, return 0; } -void nouveau_gpuobj_takedown(drm_device_t *dev) +void nouveau_gpuobj_takedown(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_gpuobj_t *gpuobj = NULL; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; DRM_DEBUG("\n"); @@ -285,11 +285,11 @@ void nouveau_gpuobj_takedown(drm_device_t *dev) } } -int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) +int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_engine_func_t *engine = &dev_priv->Engine; - nouveau_gpuobj_t *gpuobj; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine_func *engine = &dev_priv->Engine; + struct nouveau_gpuobj *gpuobj; DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); @@ -325,11 +325,11 @@ int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) } static int -nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, - nouveau_gpuobj_t *gpuobj, uint32_t *inst) +nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, + struct nouveau_gpuobj *gpuobj, uint32_t *inst) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_gpuobj_t *cpramin; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *cpramin; /* card_type < NV_50) { @@ -371,12 +371,12 @@ nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, } int -nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, - nouveau_gpuobj_t *gpuobj, nouveau_gpuobj_ref_t **ref_ret) +nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle, + struct nouveau_gpuobj *gpuobj, struct nouveau_gpuobj_ref **ref_ret) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = NULL; - nouveau_gpuobj_ref_t *ref; + struct nouveau_gpuobj_ref *ref; uint32_t instance; int ret; @@ -424,9 +424,9 @@ nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, return 0; } -int nouveau_gpuobj_ref_del(drm_device_t *dev, nouveau_gpuobj_ref_t **pref) +int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) { - nouveau_gpuobj_ref_t *ref; + struct nouveau_gpuobj_ref *ref; DRM_DEBUG("ref %p\n", pref ? *pref : NULL); @@ -452,11 +452,11 @@ int nouveau_gpuobj_ref_del(drm_device_t *dev, nouveau_gpuobj_ref_t **pref) } int -nouveau_gpuobj_new_ref(drm_device_t *dev, int oc, int rc, uint32_t handle, +nouveau_gpuobj_new_ref(struct drm_device *dev, int oc, int rc, uint32_t handle, int size, int align, uint32_t flags, - nouveau_gpuobj_ref_t **ref) + struct nouveau_gpuobj_ref **ref) { - nouveau_gpuobj_t *gpuobj = NULL; + struct nouveau_gpuobj *gpuobj = NULL; int ret; if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) @@ -471,12 +471,12 @@ nouveau_gpuobj_new_ref(drm_device_t *dev, int oc, int rc, uint32_t handle, } static int -nouveau_gpuobj_ref_find(drm_device_t *dev, int channel, uint32_t handle, - nouveau_gpuobj_ref_t **ref_ret) +nouveau_gpuobj_ref_find(struct drm_device *dev, int channel, uint32_t handle, + struct nouveau_gpuobj_ref **ref_ret) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_ref_t *ref = chan->ramht_refs; + struct nouveau_gpuobj_ref *ref = chan->ramht_refs; while (ref) { if (ref->handle == handle) { @@ -491,12 +491,12 @@ nouveau_gpuobj_ref_find(drm_device_t *dev, int channel, uint32_t handle, } int -nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, - uint32_t flags, nouveau_gpuobj_t **pgpuobj, - nouveau_gpuobj_ref_t **pref) +nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, + uint32_t flags, struct nouveau_gpuobj **pgpuobj, + struct nouveau_gpuobj_ref **pref) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_gpuobj_t *gpuobj = NULL; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; int i; DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", @@ -537,9 +537,9 @@ nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, static int -nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) +nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; /*XXX: dodgy hack for now */ if (dev_priv->card_type >= NV_50) @@ -577,11 +577,11 @@ nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) to it that can be used to set up context objects. */ int -nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, +nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, uint64_t offset, uint64_t size, int access, int target, - nouveau_gpuobj_t **gpuobj) + struct nouveau_gpuobj **gpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; uint32_t is_scatter_gather = 0; @@ -755,10 +755,10 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, set to 0? */ int -nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, - nouveau_gpuobj_t **gpuobj) +nouveau_gpuobj_gr_new(struct drm_device *dev, int channel, int class, + struct nouveau_gpuobj **gpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; DRM_DEBUG("ch%d class=0x%04x\n", channel, class); @@ -804,11 +804,11 @@ nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, } static int -nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) +nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_t *pramin = NULL; + struct nouveau_gpuobj *pramin = NULL; int size, base, ret; DRM_DEBUG("ch%d\n", channel); @@ -854,12 +854,12 @@ nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) } int -nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, +nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, uint32_t vram_h, uint32_t tt_h) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_t *vram = NULL, *tt = NULL; + struct nouveau_gpuobj *vram = NULL, *tt = NULL; int ret; DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); @@ -940,11 +940,11 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, } void -nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) +nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_ref_t *ref; + struct nouveau_gpuobj_ref *ref; DRM_DEBUG("ch%d\n", channel); @@ -964,12 +964,13 @@ nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_grobj_alloc_t init; - nouveau_gpuobj_t *gr = NULL; + struct drm_nouveau_grobj_alloc init; + struct nouveau_gpuobj *gr = NULL; int ret; - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) - data, sizeof(init)); + DRM_COPY_FROM_USER_IOCTL(init, + (struct drm_nouveau_grobj_alloc_t __user*)data, + sizeof(init)); if (!nouveau_fifo_owner(dev, filp, init.channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index fe3db168..69e9c221 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -28,9 +28,9 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" -static int nouveau_init_card_mappings(drm_device_t *dev) +static int nouveau_init_card_mappings(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; /* resource 0 is mmio regs */ @@ -86,11 +86,11 @@ static int nouveau_init_card_mappings(drm_device_t *dev) return 0; } -static int nouveau_stub_init(drm_device_t *dev) { return 0; } -static void nouveau_stub_takedown(drm_device_t *dev) {} -static int nouveau_init_engine_ptrs(drm_device_t *dev) +static int nouveau_stub_init(struct drm_device *dev) { return 0; } +static void nouveau_stub_takedown(struct drm_device *dev) {} +static int nouveau_init_engine_ptrs(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine_func *engine = &dev_priv->Engine; switch (dev_priv->chipset & 0xf0) { @@ -259,9 +259,9 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) return 0; } -static int nouveau_card_init(drm_device_t *dev) +static int nouveau_card_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine_func *engine; int ret; @@ -321,10 +321,10 @@ static int nouveau_card_init(drm_device_t *dev) return 0; } -static void nouveau_card_takedown(drm_device_t *dev) +static void nouveau_card_takedown(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - nouveau_engine_func_t *engine = &dev_priv->Engine; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine_func *engine = &dev_priv->Engine; if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { engine->fifo.takedown(dev); @@ -341,9 +341,9 @@ static void nouveau_card_takedown(drm_device_t *dev) } /* here a client dies, release the stuff that was allocated for its filp */ -void nouveau_preclose(drm_device_t * dev, DRMFILE filp) +void nouveau_preclose(struct drm_device *dev, DRMFILE filp) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_fifo_cleanup(dev, filp); nouveau_mem_release(filp,dev_priv->fb_heap); @@ -367,7 +367,7 @@ int nouveau_firstopen(struct drm_device *dev) int nouveau_load(struct drm_device *dev, unsigned long flags) { - drm_nouveau_private_t *dev_priv; + struct drm_nouveau_private *dev_priv; if (flags==NV_UNKNOWN) return DRM_ERR(EINVAL); @@ -395,7 +395,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) void nouveau_lastclose(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_card_takedown(dev); @@ -416,11 +416,12 @@ int nouveau_unload(struct drm_device *dev) int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_private_t *dev_priv = dev->dev_private; - drm_nouveau_getparam_t getparam; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_getparam getparam; - DRM_COPY_FROM_USER_IOCTL(getparam, (drm_nouveau_getparam_t __user *)data, - sizeof(getparam)); + DRM_COPY_FROM_USER_IOCTL(getparam, + (struct drm_nouveau_getparam __user *)data, + sizeof(getparam)); switch (getparam.param) { case NOUVEAU_GETPARAM_PCI_VENDOR: @@ -463,19 +464,20 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - DRM_COPY_TO_USER_IOCTL((drm_nouveau_getparam_t __user *)data, getparam, - sizeof(getparam)); + DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_getparam __user *)data, + getparam, sizeof(getparam)); return 0; } int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_private_t *dev_priv = dev->dev_private; - drm_nouveau_setparam_t setparam; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_setparam setparam; - DRM_COPY_FROM_USER_IOCTL(setparam, (drm_nouveau_setparam_t __user *)data, - sizeof(setparam)); + DRM_COPY_FROM_USER_IOCTL(setparam, + (struct drm_nouveau_setparam __user *)data, + sizeof(setparam)); switch (setparam.param) { case NOUVEAU_SETPARAM_CMDBUF_LOCATION: @@ -506,7 +508,7 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) /* waits for idle */ void nouveau_wait_for_idle(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { case NV_03: diff --git a/shared-core/nv04_fb.c b/shared-core/nv04_fb.c index 06b1c994..534fb50b 100644 --- a/shared-core/nv04_fb.c +++ b/shared-core/nv04_fb.c @@ -4,9 +4,9 @@ #include "nouveau_drm.h" int -nv04_fb_init(drm_device_t *dev) +nv04_fb_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows * nvidia reading PFB_CFG_0, then writing back its original value. @@ -18,7 +18,7 @@ nv04_fb_init(drm_device_t *dev) } void -nv04_fb_takedown(drm_device_t *dev) +nv04_fb_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index e2e934d7..564efd0b 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -36,9 +36,9 @@ #define NV04_RAMFC__SIZE 32 int -nv04_fifo_create_context(drm_device_t *dev, int channel) +nv04_fifo_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; @@ -67,9 +67,9 @@ nv04_fifo_create_context(drm_device_t *dev, int channel) } void -nv04_fifo_destroy_context(drm_device_t *dev, int channel) +nv04_fifo_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; @@ -107,9 +107,9 @@ nv04_fifo_load_context(drm_device_t *dev, int channel) } int -nv04_fifo_save_context(drm_device_t *dev, int channel) +nv04_fifo_save_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index df23d279..e35e3071 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -287,9 +287,9 @@ struct reg_interval }; -void nouveau_nv04_context_switch(drm_device_t *dev) +void nouveau_nv04_context_switch(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int channel, channel_old, i, j, index; channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); @@ -336,8 +336,8 @@ void nouveau_nv04_context_switch(drm_device_t *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x1); } -int nv04_graph_create_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; +int nv04_graph_create_context(struct drm_device *dev, int channel) { + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("nv04_graph_context_create %d\n", channel); memset(dev_priv->fifos[channel]->pgraph_ctx, 0, sizeof(dev_priv->fifos[channel]->pgraph_ctx)); @@ -351,24 +351,24 @@ int nv04_graph_create_context(drm_device_t *dev, int channel) { return 0; } -void nv04_graph_destroy_context(drm_device_t *dev, int channel) +void nv04_graph_destroy_context(struct drm_device *dev, int channel) { } -int nv04_graph_load_context(drm_device_t *dev, int channel) +int nv04_graph_load_context(struct drm_device *dev, int channel) { DRM_ERROR("stub!\n"); return 0; } -int nv04_graph_save_context(drm_device_t *dev, int channel) +int nv04_graph_save_context(struct drm_device *dev, int channel) { DRM_ERROR("stub!\n"); return 0; } -int nv04_graph_init(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; +int nv04_graph_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; int i,sum=0; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & @@ -406,7 +406,7 @@ int nv04_graph_init(drm_device_t *dev) { return 0; } -void nv04_graph_takedown(drm_device_t *dev) +void nv04_graph_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index ac7d4347..fc3b116d 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -5,7 +5,7 @@ static void nv04_instmem_determine_amount(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; /* Figure out how much instance memory we need */ @@ -35,7 +35,7 @@ nv04_instmem_determine_amount(struct drm_device *dev) static void nv04_instmem_configure_fixed_tables(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; /* FIFO hash table (RAMHT) * use 4k hash table at RAMIN+0x10000 @@ -85,7 +85,7 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev) int nv04_instmem_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t offset; int ret = 0; @@ -114,15 +114,15 @@ int nv04_instmem_init(struct drm_device *dev) } void -nv04_instmem_takedown(drm_device_t *dev) +nv04_instmem_takedown(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_gpuobj_del(dev, &dev_priv->ramht); } int -nv04_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz) +nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) { if (gpuobj->im_backing) return DRM_ERR(EINVAL); @@ -131,9 +131,9 @@ nv04_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz) } void -nv04_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (gpuobj && gpuobj->im_backing) { if (gpuobj->im_bound) @@ -144,7 +144,7 @@ nv04_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) } int -nv04_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { if (!gpuobj->im_pramin || gpuobj->im_bound) return DRM_ERR(EINVAL); @@ -154,7 +154,7 @@ nv04_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) } int -nv04_instmem_unbind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { if (gpuobj->im_bound == 0) return DRM_ERR(EINVAL); diff --git a/shared-core/nv04_mc.c b/shared-core/nv04_mc.c index 0e23efb2..1d998851 100644 --- a/shared-core/nv04_mc.c +++ b/shared-core/nv04_mc.c @@ -4,9 +4,9 @@ #include "nouveau_drm.h" int -nv04_mc_init(drm_device_t *dev) +nv04_mc_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; /* Power up everything, resetting each individual unit will * be done later if needed. @@ -19,7 +19,7 @@ nv04_mc_init(drm_device_t *dev) } void -nv04_mc_takedown(drm_device_t *dev) +nv04_mc_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv04_timer.c b/shared-core/nv04_timer.c index a4b4e826..efe78da7 100644 --- a/shared-core/nv04_timer.c +++ b/shared-core/nv04_timer.c @@ -4,9 +4,9 @@ #include "nouveau_drm.h" int -nv04_timer_init(drm_device_t *dev) +nv04_timer_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000); NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF); @@ -18,7 +18,7 @@ nv04_timer_init(drm_device_t *dev) } void -nv04_timer_takedown(drm_device_t *dev) +nv04_timer_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv10_fb.c b/shared-core/nv10_fb.c index e8336a2d..7fff5b3f 100644 --- a/shared-core/nv10_fb.c +++ b/shared-core/nv10_fb.c @@ -4,9 +4,9 @@ #include "nouveau_drm.h" int -nv10_fb_init(drm_device_t *dev) +nv10_fb_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_bar_size; int i; @@ -20,7 +20,7 @@ nv10_fb_init(drm_device_t *dev) } void -nv10_fb_takedown(drm_device_t *dev) +nv10_fb_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index 2d8d5a0d..7b9c665b 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -37,9 +37,9 @@ #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) int -nv10_fifo_create_context(drm_device_t *dev, int channel) +nv10_fifo_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; @@ -70,9 +70,9 @@ nv10_fifo_create_context(drm_device_t *dev, int channel) } void -nv10_fifo_destroy_context(drm_device_t *dev, int channel) +nv10_fifo_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; @@ -124,9 +124,9 @@ nv10_fifo_load_context(drm_device_t *dev, int channel) } int -nv10_fifo_save_context(drm_device_t *dev, int channel) +nv10_fifo_save_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index c544afac..930fcbdf 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -28,8 +28,8 @@ #include "nouveau_drv.h" -static void nv10_praph_pipe(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; +static void nv10_praph_pipe(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; nouveau_wait_for_idle(dev); @@ -527,9 +527,9 @@ NV10_PGRAPH_DEBUG_4, 0x00400a04, }; -static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg) +static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i, j; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) { if (nv10_graph_ctx_regs[i] == reg) @@ -544,9 +544,9 @@ static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg) return -1; } -static void restore_ctx_regs(drm_device_t *dev, int channel) +static void restore_ctx_regs(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *fifo = dev_priv->fifos[channel]; int i, j; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) @@ -558,9 +558,9 @@ static void restore_ctx_regs(drm_device_t *dev, int channel) nouveau_wait_for_idle(dev); } -void nouveau_nv10_context_switch(drm_device_t *dev) +void nouveau_nv10_context_switch(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int channel, channel_old, i, j; channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); @@ -611,8 +611,8 @@ void nouveau_nv10_context_switch(drm_device_t *dev) if (offset > 0) \ fifo->pgraph_ctx[offset] = val; \ } while (0) -int nv10_graph_create_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; +int nv10_graph_create_context(struct drm_device *dev, int channel) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *fifo = dev_priv->fifos[channel]; uint32_t tmp, vramsz; @@ -663,24 +663,24 @@ int nv10_graph_create_context(drm_device_t *dev, int channel) { return 0; } -void nv10_graph_destroy_context(drm_device_t *dev, int channel) +void nv10_graph_destroy_context(struct drm_device *dev, int channel) { } -int nv10_graph_load_context(drm_device_t *dev, int channel) +int nv10_graph_load_context(struct drm_device *dev, int channel) { DRM_ERROR("stub!\n"); return 0; } -int nv10_graph_save_context(drm_device_t *dev, int channel) +int nv10_graph_save_context(struct drm_device *dev, int channel) { DRM_ERROR("stub!\n"); return 0; } -int nv10_graph_init(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; +int nv10_graph_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & @@ -714,7 +714,7 @@ int nv10_graph_init(drm_device_t *dev) { return 0; } -void nv10_graph_takedown(drm_device_t *dev) +void nv10_graph_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 06d7e440..e6aa1e2a 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -29,9 +29,9 @@ #define NV20_GRCTX_SIZE (3529*4) -int nv20_graph_create_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; +int nv20_graph_create_context(struct drm_device *dev, int channel) { + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; unsigned int ctx_size = NV20_GRCTX_SIZE; int ret; @@ -49,8 +49,8 @@ int nv20_graph_create_context(drm_device_t *dev, int channel) { return 0; } -void nv20_graph_destroy_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; +void nv20_graph_destroy_context(struct drm_device *dev, int channel) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->ramin_grctx) @@ -59,9 +59,9 @@ void nv20_graph_destroy_context(drm_device_t *dev, int channel) { INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0); } -static void nv20_graph_rdi(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; +static void nv20_graph_rdi(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; int i; NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); @@ -73,9 +73,9 @@ static void nv20_graph_rdi(drm_device_t *dev) { /* Save current context (from PGRAPH) into the channel's context */ -int nv20_graph_save_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; +int nv20_graph_save_context(struct drm_device *dev, int channel) { + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t instance; @@ -94,9 +94,9 @@ int nv20_graph_save_context(drm_device_t *dev, int channel) { /* Restore the context for a specific channel into PGRAPH */ -int nv20_graph_load_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; +int nv20_graph_load_context(struct drm_device *dev, int channel) { + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t instance; @@ -113,9 +113,9 @@ int nv20_graph_load_context(drm_device_t *dev, int channel) { return 0; } -void nouveau_nv20_context_switch(drm_device_t *dev) +void nouveau_nv20_context_switch(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int channel, channel_old; channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); @@ -144,9 +144,9 @@ void nouveau_nv20_context_switch(drm_device_t *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x1); } -int nv20_graph_init(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; +int nv20_graph_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; uint32_t tmp, vramsz; int ret, i; @@ -240,7 +240,7 @@ int nv20_graph_init(drm_device_t *dev) { return 0; } -void nv20_graph_takedown(drm_device_t *dev) +void nv20_graph_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index a83ad714..23e0f7f0 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -16,9 +16,9 @@ * contexts are taken from dumps just after the 3D object is * created. */ -static void nv30_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +static void nv30_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x28/4, 0x10000000); @@ -100,12 +100,12 @@ static void nv30_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } -int nv30_graph_create_context(drm_device_t *dev, int channel) +int nv30_graph_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *); + void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); unsigned int ctx_size; int ret; @@ -131,10 +131,10 @@ int nv30_graph_create_context(drm_device_t *dev, int channel) return 0; } -void nv30_graph_destroy_context(drm_device_t *dev, int channel) +void nv30_graph_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->ramin_grctx) @@ -144,9 +144,9 @@ void nv30_graph_destroy_context(drm_device_t *dev, int channel) } static int -nouveau_graph_wait_idle(drm_device_t *dev) +nouveau_graph_wait_idle(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int tv = 1000; while (tv--) { @@ -161,9 +161,9 @@ nouveau_graph_wait_idle(drm_device_t *dev) return 0; } -int nv30_graph_load_context(drm_device_t *dev, int channel) +int nv30_graph_load_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; @@ -178,9 +178,9 @@ int nv30_graph_load_context(drm_device_t *dev, int channel) return nouveau_graph_wait_idle(dev); } -int nv30_graph_save_context(drm_device_t *dev, int channel) +int nv30_graph_save_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; @@ -195,10 +195,10 @@ int nv30_graph_save_context(drm_device_t *dev, int channel) return nouveau_graph_wait_idle(dev); } -int nv30_graph_init(drm_device_t *dev) +int nv30_graph_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; uint32_t vramsz, tmp; int ret, i; @@ -279,7 +279,7 @@ int nv30_graph_init(drm_device_t *dev) return 0; } -void nv30_graph_takedown(drm_device_t *dev) +void nv30_graph_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv40_fb.c b/shared-core/nv40_fb.c index 83a7580e..2cbb40e4 100644 --- a/shared-core/nv40_fb.c +++ b/shared-core/nv40_fb.c @@ -4,9 +4,9 @@ #include "nouveau_drm.h" int -nv40_fb_init(drm_device_t *dev) +nv40_fb_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_bar_size, tmp; int num_tiles; int i; @@ -50,7 +50,7 @@ nv40_fb_init(drm_device_t *dev) } void -nv40_fb_takedown(drm_device_t *dev) +nv40_fb_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index 818a9024..ecb1d21e 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -37,9 +37,9 @@ #define NV40_RAMFC__SIZE 128 int -nv40_fifo_create_context(drm_device_t *dev, int channel) +nv40_fifo_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; @@ -73,9 +73,9 @@ nv40_fifo_create_context(drm_device_t *dev, int channel) } void -nv40_fifo_destroy_context(drm_device_t *dev, int channel) +nv40_fifo_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp, tmp2; @@ -144,9 +144,9 @@ nv40_fifo_load_context(drm_device_t *dev, int channel) } int -nv40_fifo_save_context(drm_device_t *dev, int channel) +nv40_fifo_save_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 94d76505..d8fccb7e 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -47,9 +47,9 @@ * created. */ static void -nv40_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; /* Always has the "instance address" of itself at offset 0 */ @@ -188,9 +188,9 @@ nv40_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } static void -nv43_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -304,9 +304,9 @@ nv43_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) }; static void -nv46_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -455,9 +455,9 @@ nv46_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } static void -nv49_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -678,9 +678,9 @@ nv49_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } static void -nv4a_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -795,9 +795,9 @@ nv4a_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } static void -nv4b_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -1010,9 +1010,9 @@ nv4b_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } static void -nv4c_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -1117,9 +1117,9 @@ nv4c_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } static void -nv4e_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) +nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); @@ -1224,12 +1224,12 @@ nv4e_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx) } int -nv40_graph_create_context(drm_device_t *dev, int channel) +nv40_graph_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *); + void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); unsigned int ctx_size; int ret; @@ -1284,9 +1284,9 @@ nv40_graph_create_context(drm_device_t *dev, int channel) } void -nv40_graph_destroy_context(drm_device_t *dev, int channel) +nv40_graph_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; if (chan->ramin_grctx) @@ -1294,9 +1294,9 @@ nv40_graph_destroy_context(drm_device_t *dev, int channel) } static int -nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) +nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t old_cp, tv = 1000; int i; @@ -1327,9 +1327,9 @@ nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) *XXX: fails sometimes, not sure why.. */ int -nv40_graph_save_context(drm_device_t *dev, int channel) +nv40_graph_save_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; @@ -1344,9 +1344,9 @@ nv40_graph_save_context(drm_device_t *dev, int channel) * XXX: fails sometimes.. not sure why */ int -nv40_graph_load_context(drm_device_t *dev, int channel) +nv40_graph_load_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; int ret; @@ -1599,10 +1599,10 @@ static uint32_t nv4e_ctx_voodoo[] = { * C51 0x4e */ int -nv40_graph_init(drm_device_t *dev) +nv40_graph_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; uint32_t *ctx_voodoo; uint32_t vramsz, tmp; int i, j; @@ -1829,7 +1829,7 @@ nv40_graph_init(drm_device_t *dev) return 0; } -void nv40_graph_takedown(drm_device_t *dev) +void nv40_graph_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv40_mc.c b/shared-core/nv40_mc.c index 8dbd96fd..8bb6b083 100644 --- a/shared-core/nv40_mc.c +++ b/shared-core/nv40_mc.c @@ -4,9 +4,9 @@ #include "nouveau_drm.h" int -nv40_mc_init(drm_device_t *dev) +nv40_mc_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; /* Power up everything, resetting each individual unit will @@ -35,7 +35,7 @@ nv40_mc_init(drm_device_t *dev) } void -nv40_mc_takedown(drm_device_t *dev) +nv40_mc_takedown(struct drm_device *dev) { } diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index ee1fb887..feab24c4 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -29,18 +29,18 @@ #include "nouveau_drv.h" typedef struct { - nouveau_gpuobj_ref_t *thingo; - nouveau_gpuobj_ref_t *dummyctx; + struct nouveau_gpuobj_ref *thingo; + struct nouveau_gpuobj_ref *dummyctx; } nv50_fifo_priv; #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) static void -nv50_fifo_init_thingo(drm_device_t *dev) +nv50_fifo_init_thingo(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; - nouveau_gpuobj_ref_t *thingo = priv->thingo; + struct nouveau_gpuobj_ref *thingo = priv->thingo; int i, fi=2; DRM_DEBUG("\n"); @@ -60,9 +60,9 @@ nv50_fifo_init_thingo(drm_device_t *dev) } static int -nv50_fifo_channel_enable(drm_device_t *dev, int channel) +nv50_fifo_channel_enable(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; DRM_DEBUG("ch%d\n", channel); @@ -88,9 +88,9 @@ nv50_fifo_channel_enable(drm_device_t *dev, int channel) } static void -nv50_fifo_channel_disable(drm_device_t *dev, int channel, int nt) +nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("ch%d, nt=%d\n", channel, nt); @@ -106,9 +106,9 @@ nv50_fifo_channel_disable(drm_device_t *dev, int channel, int nt) } static void -nv50_fifo_init_reset(drm_device_t *dev) +nv50_fifo_init_reset(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t pmc_e; DRM_DEBUG("\n"); @@ -120,7 +120,7 @@ nv50_fifo_init_reset(drm_device_t *dev) } static void -nv50_fifo_init_context_table(drm_device_t *dev) +nv50_fifo_init_context_table(struct drm_device *dev) { int i; @@ -132,9 +132,9 @@ nv50_fifo_init_context_table(drm_device_t *dev) } static void -nv50_fifo_init_regs__nv(drm_device_t *dev) +nv50_fifo_init_regs__nv(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -142,9 +142,9 @@ nv50_fifo_init_regs__nv(drm_device_t *dev) } static int -nv50_fifo_init_regs(drm_device_t *dev) +nv50_fifo_init_regs(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; int ret; @@ -176,9 +176,9 @@ nv50_fifo_init_regs(drm_device_t *dev) } int -nv50_fifo_init(drm_device_t *dev) +nv50_fifo_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_fifo_priv *priv; int ret; @@ -207,9 +207,9 @@ nv50_fifo_init(drm_device_t *dev) } void -nv50_fifo_takedown(drm_device_t *dev) +nv50_fifo_takedown(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; DRM_DEBUG("\n"); @@ -225,11 +225,11 @@ nv50_fifo_takedown(drm_device_t *dev) } int -nv50_fifo_create_context(drm_device_t *dev, int channel) +nv50_fifo_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_t *ramfc = NULL; + struct nouveau_gpuobj *ramfc = NULL; int ret; DRM_DEBUG("ch%d\n", channel); @@ -283,9 +283,9 @@ nv50_fifo_create_context(drm_device_t *dev, int channel) } void -nv50_fifo_destroy_context(drm_device_t *dev, int channel) +nv50_fifo_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; DRM_DEBUG("ch%d\n", channel); @@ -295,11 +295,11 @@ nv50_fifo_destroy_context(drm_device_t *dev, int channel) } int -nv50_fifo_load_context(drm_device_t *dev, int channel) +nv50_fifo_load_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_t *ramfc = chan->ramfc->gpuobj; + struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; DRM_DEBUG("ch%d\n", channel); @@ -324,7 +324,7 @@ nv50_fifo_load_context(drm_device_t *dev, int channel) } int -nv50_fifo_save_context(drm_device_t *dev, int channel) +nv50_fifo_save_context(struct drm_device *dev, int channel) { DRM_DEBUG("ch%d\n", channel); DRM_ERROR("stub!\n"); diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 271ed733..54fe498b 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -31,9 +31,9 @@ #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) static void -nv50_graph_init_reset(drm_device_t *dev) +nv50_graph_init_reset(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t pmc_e; DRM_DEBUG("\n"); @@ -45,9 +45,9 @@ nv50_graph_init_reset(drm_device_t *dev) } static void -nv50_graph_init_regs__nv(drm_device_t *dev) +nv50_graph_init_regs__nv(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -66,9 +66,9 @@ nv50_graph_init_regs__nv(drm_device_t *dev) } static void -nv50_graph_init_regs(drm_device_t *dev) +nv50_graph_init_regs(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -139,9 +139,9 @@ static uint32_t nv84_ctx_voodoo[] = { }; static void -nv50_graph_init_ctxctl(drm_device_t *dev) +nv50_graph_init_ctxctl(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t *voodoo; DRM_DEBUG("\n"); @@ -169,7 +169,7 @@ nv50_graph_init_ctxctl(drm_device_t *dev) } int -nv50_graph_init(drm_device_t *dev) +nv50_graph_init(struct drm_device *dev) { DRM_DEBUG("\n"); @@ -182,17 +182,17 @@ nv50_graph_init(drm_device_t *dev) } void -nv50_graph_takedown(drm_device_t *dev) +nv50_graph_takedown(struct drm_device *dev) { DRM_DEBUG("\n"); } int -nv50_graph_create_context(drm_device_t *dev, int channel) +nv50_graph_create_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; - nouveau_gpuobj_t *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; int grctx_size = 0x60000, hdr; int ret; @@ -218,9 +218,9 @@ nv50_graph_create_context(drm_device_t *dev, int channel) } void -nv50_graph_destroy_context(drm_device_t *dev, int channel) +nv50_graph_destroy_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; int i, hdr; @@ -234,9 +234,9 @@ nv50_graph_destroy_context(drm_device_t *dev, int channel) } static int -nv50_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) +nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t old_cp, tv = 20000; int i; @@ -266,9 +266,9 @@ nv50_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) } int -nv50_graph_load_context(drm_device_t *dev, int channel) +nv50_graph_load_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); int ret; @@ -288,9 +288,9 @@ nv50_graph_load_context(drm_device_t *dev, int channel) } int -nv50_graph_save_context(drm_device_t *dev, int channel) +nv50_graph_save_context(struct drm_device *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index 81c60829..027d3ffb 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -39,9 +39,9 @@ typedef struct { #define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) int -nv50_instmem_init(drm_device_t *dev) +nv50_instmem_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_instmem_priv *priv; uint32_t rv, pt, pts, cb, cb0, cb1, unk, as; uint32_t i, v; @@ -150,9 +150,9 @@ nv50_instmem_init(drm_device_t *dev) } void -nv50_instmem_takedown(drm_device_t *dev) +nv50_instmem_takedown(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; int i; @@ -168,7 +168,7 @@ nv50_instmem_takedown(drm_device_t *dev) } int -nv50_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz) +nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) { if (gpuobj->im_backing) return DRM_ERR(EINVAL); @@ -189,9 +189,9 @@ nv50_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz) } void -nv50_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (gpuobj && gpuobj->im_backing) { if (gpuobj->im_bound) @@ -202,9 +202,9 @@ nv50_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) } int -nv50_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t pte, pte_end, vram; if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) @@ -240,9 +240,9 @@ nv50_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) } int -nv50_instmem_unbind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj) +nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t pte, pte_end; if (gpuobj->im_bound == 0) diff --git a/shared-core/nv50_mc.c b/shared-core/nv50_mc.c index 7f7537f0..952dea9f 100644 --- a/shared-core/nv50_mc.c +++ b/shared-core/nv50_mc.c @@ -29,14 +29,14 @@ #include "nouveau_drv.h" int -nv50_mc_init(drm_device_t *dev) +nv50_mc_init(struct drm_device *dev) { - drm_nouveau_private_t *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); return 0; } -void nv50_mc_takedown(drm_device_t *dev) +void nv50_mc_takedown(struct drm_device *dev) { } From 3007b03bdf608708a50b842d4291d3640c30f2c5 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 13 Jul 2007 15:57:17 +0200 Subject: [PATCH 108/437] now attempting to create PCI object only when there is a pci_heap --- shared-core/nouveau_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 16b38e95..de1f0ca2 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -918,7 +918,7 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, return ret; } } - else { + else if ( dev_priv->pci_heap) { if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ /*PCI*/ From 5ae3ad4f015aa072180a0c55255832be4e7557cf Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 13 Jul 2007 15:57:17 +0200 Subject: [PATCH 109/437] now attempting to create PCI object only when there is a pci_heap --- shared-core/nouveau_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index c5697d8e..aab2e3ac 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -918,7 +918,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } } - else { + else if ( dev_priv->pci_heap) { if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ /*PCI*/ From aa6d9199fa7b0cbe04a936312db7be75bb53bdc8 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 13 Jul 2007 20:51:52 +0200 Subject: [PATCH 110/437] applied patch from Ian Romanick fixing PCI DMA object creation code --- shared-core/nouveau_object.c | 77 +++++++++++++++++------------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index aab2e3ac..146c4f1c 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -585,6 +585,11 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, int ret; uint32_t is_scatter_gather = 0; + /* Total number of pages covered by the request. + */ + const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; + + DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", channel, class, offset, size); DRM_DEBUG("access=%d target=%d\n", access, target); @@ -604,7 +609,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, } ret = nouveau_gpuobj_new(dev, channel, - is_scatter_gather ? ((((size + PAGE_SIZE - 1) / PAGE_SIZE) << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), + is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, gpuobj); @@ -634,9 +639,19 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, } else { + /* Intial page entry in the scatter-gather area that + * corresponds to the base offset + */ + unsigned int idx = offset / PAGE_SIZE; + uint32_t instance_offset; - uint64_t bus_addr; - size = (uint32_t) size; + unsigned int i; + + if ((idx + page_count) > dev->sg->pages) { + DRM_ERROR("Requested page range exceedes " + "allocated scatter-gather range!"); + return DRM_ERR(E2BIG); + } DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | @@ -644,52 +659,34 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, (access << 14) | (target << 16) | class)); - INSTANCE_WR(*gpuobj, 1, size-1); + INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1); - offset += dev->sg->virtual; /*write starting at the third dword*/ instance_offset = 2; /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ - while ( size > 0 ) { - bus_addr = vmalloc_to_page(offset); - if ( ! bus_addr ) - { - DRM_ERROR("Couldn't map virtual address %#llx to a page number\n", offset); - nouveau_gpuobj_del(dev, gpuobj); - return DRM_ERR(ENOMEM); - } - bus_addr = (uint64_t) page_address(bus_addr); - if ( ! bus_addr ) - { - DRM_ERROR("Couldn't find page address for address %#llx\n", offset); - nouveau_gpuobj_del(dev, gpuobj); - return DRM_ERR(ENOMEM); - } - bus_addr |= (offset & ~PAGE_MASK); - bus_addr = virt_to_bus((void *)bus_addr); - if ( ! bus_addr ) - { - DRM_ERROR("Couldn't get bus address for %#llx\n", offset); - nouveau_gpuobj_del(dev, gpuobj); - return DRM_ERR(ENOMEM); - } + for (i = 0; i < page_count; i++) { + if (dev->sg->busaddr[idx] == 0) { + dev->sg->busaddr[idx] = + pci_map_page(dev->pdev, + dev->sg->pagelist[idx], + 0, + DMA_31BIT_MASK, + DMA_BIDIRECTIONAL); - /*if ( bus_addr >= 1 << 32 ) - { - DRM_ERROR("Bus address %#llx is over 32 bits, Nvidia cards cannot address it !\n", bus_addr); - nouveau_gpuobj_del(dev, gpuobj); - return DRM_ERR(EINVAL); - }*/ - - frame = (uint32_t) bus_addr & ~0x00000FFF; - INSTANCE_WR(*gpuobj, instance_offset, frame | pte_flags); - offset += PAGE_SIZE; - instance_offset ++; - size -= PAGE_SIZE; + if (dev->sg->busaddr[idx] == 0) { + return DRM_ERR(ENOMEM); + } } + frame = (uint32_t) dev->sg->busaddr[idx]; + INSTANCE_WR(*gpuobj, instance_offset, + frame | pte_flags); + + idx++; + instance_offset ++; + } } } else { INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); From bc7d6c76fab2ff4d2f11b6bd84ca8b8f124729fd Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Sat, 14 Jul 2007 18:32:11 +0200 Subject: [PATCH 111/437] nouveau: nv10 and nv11/15 are different --- shared-core/nouveau_drm.h | 4 ++-- shared-core/nouveau_fifo.c | 1 + shared-core/nouveau_irq.c | 1 + shared-core/nouveau_mem.c | 1 + shared-core/nv04_instmem.c | 1 + 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index e2a9ea83..b39a7932 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -114,8 +114,8 @@ enum nouveau_card_type { NV_04 =4, NV_05 =5, NV_10 =10, - NV_11 =10, - NV_15 =10, + NV_11 =11, + NV_15 =11, NV_17 =17, NV_20 =20, NV_25 =20, diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index c769f58f..236dd4a1 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -99,6 +99,7 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev) (1 << 16) /* 64 Bytes entry*/); /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ break; + case NV_11: case NV_10: case NV_04: case NV_03: diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 451262a1..84319219 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -355,6 +355,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) nouveau_nv04_context_switch(dev); break; case NV_10: + case NV_11: case NV_17: nouveau_nv10_context_switch(dev); break; diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 2b2418fb..f09bcea7 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -253,6 +253,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) } break; case NV_10: + case NV_11: case NV_17: case NV_20: case NV_30: diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index fc3b116d..7cf06269 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -70,6 +70,7 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev) case NV_30: case NV_20: case NV_17: + case NV_11: case NV_10: case NV_04: case NV_03: From f174f835ffac330bbd373d8ba5091205be28f327 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 10:13:58 +1000 Subject: [PATCH 112/437] drm: remove typedefs in drm.h to their own section --- shared-core/drm.h | 231 +++++++++++++++++++++++++++------------------- 1 file changed, 138 insertions(+), 93 deletions(-) diff --git a/shared-core/drm.h b/shared-core/drm.h index e017c023..f8912b3b 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -152,31 +152,31 @@ typedef unsigned int drm_magic_t; /**< Magic for authentication */ * \note KW: Actually it's illegal to change either for * backwards-compatibility reasons. */ -typedef struct drm_clip_rect { +struct drm_clip_rect { unsigned short x1; unsigned short y1; unsigned short x2; unsigned short y2; -} drm_clip_rect_t; +}; /** * Drawable information. */ -typedef struct drm_drawable_info { +struct drm_drawable_info { unsigned int num_rects; - drm_clip_rect_t *rects; -} drm_drawable_info_t; + struct drm_clip_rect *rects; +}; /** * Texture region, */ -typedef struct drm_tex_region { +struct drm_tex_region { unsigned char next; unsigned char prev; unsigned char in_use; unsigned char padding; unsigned int age; -} drm_tex_region_t; +}; /** * Hardware lock. @@ -185,10 +185,10 @@ typedef struct drm_tex_region { * processor bus contention on a multiprocessor system, there should not be any * other data stored in the same cache line. */ -typedef struct drm_hw_lock { +struct drm_hw_lock { __volatile__ unsigned int lock; /**< lock variable */ char padding[60]; /**< Pad to cache line */ -} drm_hw_lock_t; +}; /* This is beyond ugly, and only works on GCC. However, it allows me to use * drm.h in places (i.e., in the X-server) where I can't use size_t. The real @@ -211,7 +211,7 @@ typedef struct drm_hw_lock { * * \sa drmGetVersion(). */ -typedef struct drm_version { +struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ @@ -221,35 +221,35 @@ typedef struct drm_version { char __user *date; /**< User-space buffer to hold date */ DRM_SIZE_T desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ -} drm_version_t; +}; /** * DRM_IOCTL_GET_UNIQUE ioctl argument type. * * \sa drmGetBusid() and drmSetBusId(). */ -typedef struct drm_unique { +struct drm_unique { DRM_SIZE_T unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ -} drm_unique_t; +}; #undef DRM_SIZE_T -typedef struct drm_list { +struct drm_list { int count; /**< Length of user-space structures */ - drm_version_t __user *version; -} drm_list_t; + struct drm_version __user *version; +}; -typedef struct drm_block { +struct drm_block { int unused; -} drm_block_t; +}; /** * DRM_IOCTL_CONTROL ioctl argument type. * * \sa drmCtlInstHandler() and drmCtlUninstHandler(). */ -typedef struct drm_control { +struct drm_control { enum { DRM_ADD_COMMAND, DRM_RM_COMMAND, @@ -257,12 +257,12 @@ typedef struct drm_control { DRM_UNINST_HANDLER } func; int irq; -} drm_control_t; +}; /** * Type of memory to map. */ -typedef enum drm_map_type { +enum drm_map_type { _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ _DRM_REGISTERS = 1, /**< no caching, no core dump */ _DRM_SHM = 2, /**< shared, cached */ @@ -270,12 +270,12 @@ typedef enum drm_map_type { _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ _DRM_TTM = 6 -} drm_map_type_t; +}; /** * Memory mapping flags. */ -typedef enum drm_map_flags { +enum drm_map_flags { _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ _DRM_READ_ONLY = 0x02, _DRM_LOCKED = 0x04, /**< shared, cached, locked */ @@ -283,12 +283,12 @@ typedef enum drm_map_flags { _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ _DRM_REMOVABLE = 0x40 /**< Removable mapping */ -} drm_map_flags_t; +}; -typedef struct drm_ctx_priv_map { +struct drm_ctx_priv_map { unsigned int ctx_id; /**< Context requesting private mapping */ void *handle; /**< Handle of map */ -} drm_ctx_priv_map_t; +}; /** * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls @@ -296,30 +296,30 @@ typedef struct drm_ctx_priv_map { * * \sa drmAddMap(). */ -typedef struct drm_map { +struct drm_map { unsigned long offset; /**< Requested physical address (0 for SAREA)*/ unsigned long size; /**< Requested physical size (bytes) */ - drm_map_type_t type; /**< Type of memory to map */ - drm_map_flags_t flags; /**< Flags */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ void *handle; /**< User-space: "Handle" to pass to mmap() */ /**< Kernel-space: kernel-virtual address */ int mtrr; /**< MTRR slot used */ /* Private data */ -} drm_map_t; +}; /** * DRM_IOCTL_GET_CLIENT ioctl argument type. */ -typedef struct drm_client { +struct drm_client { int idx; /**< Which client desired? */ int auth; /**< Is client authenticated? */ unsigned long pid; /**< Process ID */ unsigned long uid; /**< User ID */ unsigned long magic; /**< Magic */ unsigned long iocs; /**< Ioctl count */ -} drm_client_t; +}; -typedef enum { +enum drm_stat_type { _DRM_STAT_LOCK, _DRM_STAT_OPENS, _DRM_STAT_CLOSES, @@ -337,23 +337,23 @@ typedef enum { _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ _DRM_STAT_MISSED /**< Missed DMA opportunity */ /* Add to the *END* of the list */ -} drm_stat_type_t; +}; /** * DRM_IOCTL_GET_STATS ioctl argument type. */ -typedef struct drm_stats { +struct drm_stats { unsigned long count; struct { unsigned long value; - drm_stat_type_t type; + enum drm_stat_type type; } data[15]; -} drm_stats_t; +}; /** * Hardware locking flags. */ -typedef enum drm_lock_flags { +enum drm_lock_flags { _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ @@ -363,17 +363,17 @@ typedef enum drm_lock_flags { full-screen DGA-like mode. */ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ -} drm_lock_flags_t; +}; /** * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. * * \sa drmGetLock() and drmUnlock(). */ -typedef struct drm_lock { +struct drm_lock { int context; - drm_lock_flags_t flags; -} drm_lock_t; + enum drm_lock_flags flags; +}; /** * DMA flags @@ -383,7 +383,7 @@ typedef struct drm_lock { * * \sa drm_dma. */ -typedef enum drm_dma_flags { +enum drm_dma_flags { /* Flags for DMA buffer dispatch */ _DRM_DMA_BLOCK = 0x01, /**< * Block until buffer dispatched. @@ -402,14 +402,14 @@ typedef enum drm_dma_flags { _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ -} drm_dma_flags_t; +}; /** * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. * * \sa drmAddBufs(). */ -typedef struct drm_buf_desc { +struct drm_buf_desc { int count; /**< Number of buffers of this size */ int size; /**< Size in bytes */ int low_mark; /**< Low water mark */ @@ -425,48 +425,48 @@ typedef struct drm_buf_desc { * Start address of where the AGP buffers are * in the AGP aperture */ -} drm_buf_desc_t; +}; /** * DRM_IOCTL_INFO_BUFS ioctl argument type. */ -typedef struct drm_buf_info { +struct drm_buf_info { int count; /**< Number of buffers described in list */ - drm_buf_desc_t __user *list; /**< List of buffer descriptions */ -} drm_buf_info_t; + struct drm_buf_desc __user *list; /**< List of buffer descriptions */ +}; /** * DRM_IOCTL_FREE_BUFS ioctl argument type. */ -typedef struct drm_buf_free { +struct drm_buf_free { int count; int __user *list; -} drm_buf_free_t; +}; /** * Buffer information * * \sa drm_buf_map. */ -typedef struct drm_buf_pub { +struct drm_buf_pub { int idx; /**< Index into the master buffer list */ int total; /**< Buffer size */ int used; /**< Amount of buffer in use (for DMA) */ void __user *address; /**< Address of buffer */ -} drm_buf_pub_t; +}; /** * DRM_IOCTL_MAP_BUFS ioctl argument type. */ -typedef struct drm_buf_map { +struct drm_buf_map { int count; /**< Length of the buffer list */ #if defined(__cplusplus) void __user *c_virtual; #else void __user *virtual; /**< Mmap'd area in user-virtual */ #endif - drm_buf_pub_t __user *list; /**< Buffer information */ -} drm_buf_map_t; + struct drm_buf_pub __user *list; /**< Buffer information */ +}; /** * DRM_IOCTL_DMA ioctl argument type. @@ -475,48 +475,48 @@ typedef struct drm_buf_map { * * \sa drmDMA(). */ -typedef struct drm_dma { +struct drm_dma { int context; /**< Context handle */ int send_count; /**< Number of buffers to send */ int __user *send_indices; /**< List of handles to buffers */ int __user *send_sizes; /**< Lengths of data to send */ - drm_dma_flags_t flags; /**< Flags */ + enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ int __user *request_indices; /**< Buffer information */ int __user *request_sizes; int granted_count; /**< Number of buffers granted */ -} drm_dma_t; +}; -typedef enum { +enum drm_ctx_flags { _DRM_CONTEXT_PRESERVED = 0x01, _DRM_CONTEXT_2DONLY = 0x02 -} drm_ctx_flags_t; +}; /** * DRM_IOCTL_ADD_CTX ioctl argument type. * * \sa drmCreateContext() and drmDestroyContext(). */ -typedef struct drm_ctx { +struct drm_ctx { drm_context_t handle; - drm_ctx_flags_t flags; -} drm_ctx_t; + enum drm_ctx_flags flags; +}; /** * DRM_IOCTL_RES_CTX ioctl argument type. */ -typedef struct drm_ctx_res { +struct drm_ctx_res { int count; - drm_ctx_t __user *contexts; -} drm_ctx_res_t; + struct drm_ctx __user *contexts; +}; /** * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. */ -typedef struct drm_draw { +struct drm_draw { drm_drawable_t handle; -} drm_draw_t; +}; /** * DRM_IOCTL_UPDATE_DRAW ioctl argument type. @@ -525,53 +525,53 @@ typedef enum { DRM_DRAWABLE_CLIPRECTS, } drm_drawable_info_type_t; -typedef struct drm_update_draw { +struct drm_update_draw { drm_drawable_t handle; unsigned int type; unsigned int num; unsigned long long data; -} drm_update_draw_t; +}; /** * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. */ -typedef struct drm_auth { +struct drm_auth { drm_magic_t magic; -} drm_auth_t; +}; /** * DRM_IOCTL_IRQ_BUSID ioctl argument type. * * \sa drmGetInterruptFromBusID(). */ -typedef struct drm_irq_busid { +struct drm_irq_busid { int irq; /**< IRQ number */ int busnum; /**< bus number */ int devnum; /**< device number */ int funcnum; /**< function number */ -} drm_irq_busid_t; +}; -typedef enum { +enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ -} drm_vblank_seq_type_t; +}; #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; unsigned long signal; }; struct drm_wait_vblank_reply { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; long tval_sec; long tval_usec; @@ -582,41 +582,41 @@ struct drm_wait_vblank_reply { * * \sa drmWaitVBlank(). */ -typedef union drm_wait_vblank { +union drm_wait_vblank { struct drm_wait_vblank_request request; struct drm_wait_vblank_reply reply; -} drm_wait_vblank_t; +}; /** * DRM_IOCTL_AGP_ENABLE ioctl argument type. * * \sa drmAgpEnable(). */ -typedef struct drm_agp_mode { +struct drm_agp_mode { unsigned long mode; /**< AGP mode */ -} drm_agp_mode_t; +}; /** * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. * * \sa drmAgpAlloc() and drmAgpFree(). */ -typedef struct drm_agp_buffer { +struct drm_agp_buffer { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for binding / unbinding */ unsigned long type; /**< Type of memory to allocate */ unsigned long physical; /**< Physical used by i810 */ -} drm_agp_buffer_t; +}; /** * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. * * \sa drmAgpBind() and drmAgpUnbind(). */ -typedef struct drm_agp_binding { +struct drm_agp_binding { unsigned long handle; /**< From drm_agp_buffer */ unsigned long offset; /**< In bytes -- will round to page boundary */ -} drm_agp_binding_t; +}; /** * DRM_IOCTL_AGP_INFO ioctl argument type. @@ -625,7 +625,7 @@ typedef struct drm_agp_binding { * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), * drmAgpVendorId() and drmAgpDeviceId(). */ -typedef struct drm_agp_info { +struct drm_agp_info { int agp_version_major; int agp_version_minor; unsigned long mode; @@ -639,25 +639,25 @@ typedef struct drm_agp_info { unsigned short id_vendor; unsigned short id_device; /*@} */ -} drm_agp_info_t; +}; /** * DRM_IOCTL_SG_ALLOC ioctl argument type. */ -typedef struct drm_scatter_gather { +struct drm_scatter_gather { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for mapping / unmapping */ -} drm_scatter_gather_t; +}; /** * DRM_IOCTL_SET_VERSION ioctl argument type. */ -typedef struct drm_set_version { +struct drm_set_version { int drm_di_major; int drm_di_minor; int drm_dd_major; int drm_dd_minor; -} drm_set_version_t; +}; #define DRM_FENCE_FLAG_EMIT 0x00000001 @@ -1015,4 +1015,49 @@ typedef struct drm_mm_init_arg { #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 +/* typedef area */ +#if 1 +/*!defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)*/ +typedef struct drm_clip_rect drm_clip_rect_t; +typedef struct drm_drawable_info drm_drawable_info_t; +typedef struct drm_tex_region drm_tex_region_t; +typedef struct drm_hw_lock drm_hw_lock_t; +typedef struct drm_version drm_version_t; +typedef struct drm_unique drm_unique_t; +typedef struct drm_list drm_list_t; +typedef struct drm_block drm_block_t; +typedef struct drm_control drm_control_t; +typedef enum drm_map_type drm_map_type_t; +typedef enum drm_map_flags drm_map_flags_t; +typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; +typedef struct drm_map drm_map_t; +typedef struct drm_client drm_client_t; +typedef enum drm_stat_type drm_stat_type_t; +typedef struct drm_stats drm_stats_t; +typedef enum drm_lock_flags drm_lock_flags_t; +typedef struct drm_lock drm_lock_t; +typedef enum drm_dma_flags drm_dma_flags_t; +typedef struct drm_buf_desc drm_buf_desc_t; +typedef struct drm_buf_info drm_buf_info_t; +typedef struct drm_buf_free drm_buf_free_t; +typedef struct drm_buf_pub drm_buf_pub_t; +typedef struct drm_buf_map drm_buf_map_t; +typedef struct drm_dma drm_dma_t; +typedef union drm_wait_vblank drm_wait_vblank_t; +typedef struct drm_agp_mode drm_agp_mode_t; +typedef enum drm_ctx_flags drm_ctx_flags_t; +typedef struct drm_ctx drm_ctx_t; +typedef struct drm_ctx_res drm_ctx_res_t; +typedef struct drm_draw drm_draw_t; +typedef struct drm_update_draw drm_update_draw_t; +typedef struct drm_auth drm_auth_t; +typedef struct drm_irq_busid drm_irq_busid_t; +typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; +typedef struct drm_agp_buffer drm_agp_buffer_t; +typedef struct drm_agp_binding drm_agp_binding_t; +typedef struct drm_agp_info drm_agp_info_t; +typedef struct drm_scatter_gather drm_scatter_gather_t; +typedef struct drm_set_version drm_set_version_t; +#endif + #endif From 4be9554fcdf27bce86d0d69068d284af2793b950 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 11:13:07 +1000 Subject: [PATCH 113/437] drm: fix typedef in drm_os_linux.h --- linux-core/drm_os_linux.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 2ea105c5..9d0d3f69 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -52,8 +52,8 @@ /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() /** DRM device local declaration */ -#define DRM_DEVICE drm_file_t *priv = filp->private_data; \ - drm_device_t *dev = priv->head->dev +#define DRM_DEVICE struct drm_file *priv = filp->private_data; \ + struct drm_device *dev = priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg From b95ac8b7b313ad3eadc9e8bb0ead155303b7fa92 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 11:22:15 +1000 Subject: [PATCH 114/437] drm: detypedef drm.h and fixup all problems --- linux-core/drmP.h | 96 ++++++++++++++++++------------------- linux-core/drm_agpsupport.c | 36 +++++++------- linux-core/drm_auth.c | 44 ++++++++--------- linux-core/drm_bufs.c | 77 ++++++++++++++--------------- linux-core/drm_context.c | 46 +++++++++--------- linux-core/drm_drawable.c | 26 +++++----- linux-core/drm_drv.c | 4 +- linux-core/drm_ioctl.c | 28 +++++------ linux-core/drm_irq.c | 12 ++--- linux-core/drm_lock.c | 8 ++-- linux-core/drm_proc.c | 4 +- linux-core/drm_scatter.c | 16 +++---- linux-core/drm_vm.c | 14 +++--- linux-core/i810_dma.c | 10 ++-- linux-core/i810_drm.h | 2 +- linux-core/i810_drv.h | 4 +- shared-core/drm.h | 93 ++++++++++++++++++----------------- shared-core/drm_sarea.h | 26 ++++++---- shared-core/i915_dma.c | 10 ++-- shared-core/i915_drm.h | 8 ++-- shared-core/i915_irq.c | 13 ++--- shared-core/i915_mem.c | 2 +- shared-core/mach64_dma.c | 12 ++--- shared-core/mach64_drm.h | 4 +- shared-core/mach64_state.c | 8 ++-- shared-core/mga_dma.c | 26 +++++----- shared-core/mga_drm.h | 6 +-- shared-core/mga_state.c | 12 ++--- shared-core/nouveau_drm.h | 2 +- shared-core/nouveau_mem.c | 10 ++-- shared-core/r128_cce.c | 6 +-- shared-core/r128_drm.h | 4 +- shared-core/r128_state.c | 6 +-- shared-core/r300_cmdbuf.c | 2 +- shared-core/radeon_cp.c | 6 +-- shared-core/radeon_drm.h | 6 +-- shared-core/radeon_drv.h | 2 +- shared-core/radeon_state.c | 12 ++--- shared-core/savage_bci.c | 8 ++-- shared-core/savage_drm.h | 4 +- shared-core/savage_drv.h | 6 +-- shared-core/savage_state.c | 18 +++---- shared-core/via_drm.h | 4 +- 43 files changed, 373 insertions(+), 370 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2bbc6200..cf2ed2ed 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -296,13 +296,9 @@ typedef struct drm_ioctl_desc { int flags; } drm_ioctl_desc_t; -typedef struct drm_devstate { - pid_t owner; /**< X server pid holding x_lock */ -} drm_devstate_t; - typedef struct drm_magic_entry { struct list_head head; - drm_hash_item_t hash_item; + struct drm_hash_item hash_item; struct drm_file *priv; } drm_magic_entry_t; @@ -346,10 +342,10 @@ typedef struct drm_buf { /** bufs is one longer than it has to be */ typedef struct drm_waitlist { int count; /**< Number of possible buffers */ - drm_buf_t **bufs; /**< List of pointers to buffers */ - drm_buf_t **rp; /**< Read pointer */ - drm_buf_t **wp; /**< Write pointer */ - drm_buf_t **end; /**< End pointer */ + struct drm_buf **bufs; /**< List of pointers to buffers */ + struct drm_buf **rp; /**< Read pointer */ + struct drm_buf **wp; /**< Write pointer */ + struct drm_buf **end; /**< End pointer */ spinlock_t read_lock; spinlock_t write_lock; } drm_waitlist_t; @@ -357,7 +353,7 @@ typedef struct drm_waitlist { typedef struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ - drm_buf_t *next; /**< End pointer */ + struct drm_buf *next; /**< End pointer */ wait_queue_head_t waiting; /**< Processes waiting on free bufs */ int low_mark; /**< Low water mark */ @@ -378,11 +374,11 @@ typedef struct drm_dma_handle { typedef struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ - drm_buf_t *buflist; /**< buffer list */ + struct drm_buf *buflist; /**< buffer list */ int seg_count; int page_order; - drm_dma_handle_t **seglist; - drm_freelist_t freelist; + struct drm_dma_handle **seglist; + struct drm_freelist freelist; } drm_buf_entry_t; /* @@ -440,8 +436,8 @@ typedef struct drm_queue { atomic_t total_flushed; /**< Total flushes statistic */ atomic_t total_locks; /**< Total locks statistics */ #endif - drm_ctx_flags_t flags; /**< Context preserving and 2D-only */ - drm_waitlist_t waitlist; /**< Pending buffers */ + enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ + struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ } drm_queue_t; @@ -449,7 +445,7 @@ typedef struct drm_queue { * Lock data. */ typedef struct drm_lock_data { - drm_hw_lock_t *hw_lock; /**< Hardware lock */ + struct drm_hw_lock *hw_lock; /**< Hardware lock */ struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ @@ -464,9 +460,9 @@ typedef struct drm_lock_data { */ typedef struct drm_device_dma { - drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ - drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ int seg_count; int page_count; /**< number of pages */ unsigned long *pagelist; /**< page list */ @@ -524,7 +520,7 @@ typedef struct drm_sg_mem { typedef struct drm_sigdata { int context; - drm_hw_lock_t *lock; + struct drm_hw_lock *lock; } drm_sigdata_t; @@ -553,13 +549,13 @@ typedef struct drm_mm { */ typedef struct drm_map_list { struct list_head head; /**< list head */ - drm_hash_item_t hash; - drm_map_t *map; /**< mapping */ + struct drm_hash_item hash; + struct drm_map *map; /**< mapping */ drm_u64_t user_token; drm_mm_node_t *file_offset_node; } drm_map_list_t; -typedef drm_map_t drm_local_map_t; +typedef struct drm_map drm_local_map_t; /** * Context handle list @@ -567,7 +563,7 @@ typedef drm_map_t drm_local_map_t; typedef struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ - drm_file_t *tag; /**< associated fd private data */ + struct drm_file *tag; /**< associated fd private data */ } drm_ctx_list_t; typedef struct drm_vbl_sig { @@ -646,9 +642,9 @@ struct drm_driver { struct file * filp); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, struct file * filp); - unsigned long (*get_map_ofs) (drm_map_t * map); + unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); - void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); + void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); struct drm_fence_driver *fence_driver; struct drm_bo_driver *bo_driver; @@ -713,14 +709,14 @@ typedef struct drm_device { /** \name Performance counters */ /*@{ */ unsigned long counters; - drm_stat_type_t types[15]; + enum drm_stat_type types[15]; atomic_t counts[15]; /*@} */ /** \name Authentication */ /*@{ */ struct list_head filelist; - drm_open_hash_t magiclist; + struct drm_open_hash magiclist; struct list_head magicfree; /*@} */ @@ -728,7 +724,7 @@ typedef struct drm_device { /*@{ */ struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ - drm_open_hash_t map_hash; /**< User token hash table for maps */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ drm_mm_t offset_manager; /**< User token manager */ drm_open_hash_t object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ @@ -743,7 +739,7 @@ typedef struct drm_device { struct idr ctx_idr; struct list_head vmalist; /**< List of vmas (for debugging) */ - drm_lock_data_t lock; /**< Information on hardware lock */ + struct drm_lock_data lock; /**< Information on hardware lock */ /*@} */ /** \name DMA queues (contexts) */ @@ -751,8 +747,8 @@ typedef struct drm_device { int queue_count; /**< Number of active DMA queues */ int queue_reserved; /**< Number of reserved DMA queues */ int queue_slots; /**< Actual length of queuelist */ - drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */ - drm_device_dma_t *dma; /**< Optional pointer for DMA support */ + struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ /*@} */ /** \name Context support */ @@ -792,7 +788,7 @@ typedef struct drm_device { wait_queue_head_t buf_readers; /**< Processes waiting to read */ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ - drm_agp_head_t *agp; /**< AGP data */ + struct drm_agp_head *agp; /**< AGP data */ struct pci_dev *pdev; /**< PCI device structure */ int pci_vendor; /**< PCI vendor id */ @@ -800,15 +796,15 @@ typedef struct drm_device { #ifdef __alpha__ struct pci_controller *hose; #endif - drm_sg_mem_t *sg; /**< Scatter gather memory */ + struct drm_sg_mem *sg; /**< Scatter gather memory */ void *dev_private; /**< device private data */ - drm_sigdata_t sigdata; /**< For block_all_signals */ + struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; struct drm_driver *driver; drm_local_map_t *agp_buffer_map; unsigned int agp_buffer_token; - drm_head_t primary; /**< primary screen head */ + struct drm_head primary; /**< primary screen head */ drm_fence_manager_t fm; drm_buffer_manager_t bm; @@ -915,7 +911,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Mapping support (drm_vm.h) */ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); -extern unsigned long drm_core_get_map_ofs(drm_map_t * map); +extern unsigned long drm_core_get_map_ofs(struct drm_map * map); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma); @@ -992,8 +988,8 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_update_drawable_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, - drm_drawable_t id); +extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, + drm_drawable_t id); extern void drm_drawable_free_all(drm_device_t *dev); /* Authentication IOCTL support (drm_auth.h) */ @@ -1021,12 +1017,12 @@ extern int drm_i_have_hw_lock(struct file *filp); extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ -extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_fb (drm_device_t * dev, drm_buf_desc_t * request); +extern int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request); +extern int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request); +extern int drm_addbufs_fb (drm_device_t * dev, struct drm_buf_desc * request); extern int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr); + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr); extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); @@ -1081,22 +1077,22 @@ extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, extern int drm_agp_release(drm_device_t *dev); extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode); +extern int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode); extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info); +extern int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info); extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request); extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request); extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request); extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) @@ -1134,7 +1130,7 @@ extern int drm_proc_cleanup(int minor, extern void drm_sg_cleanup(drm_sg_mem_t * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); +extern int drm_sg_alloc(drm_device_t *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index f134563a..c037defe 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -48,7 +48,7 @@ * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ -int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) +int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; @@ -75,14 +75,14 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_info_t info; + struct drm_agp_info info; int err; err = drm_agp_info(dev, &info); if (err) return err; - if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info))) + if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info))) return -EFAULT; return 0; } @@ -181,7 +181,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ -int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) +int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -203,10 +203,10 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_mode_t mode; + struct drm_agp_mode mode; - if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode))) + if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode))) return -EFAULT; return drm_agp_enable(dev, mode); @@ -224,7 +224,7 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ -int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request) { drm_agp_mem_t *entry; DRM_AGP_MEM *memory; @@ -264,8 +264,8 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; - drm_agp_buffer_t __user *argp = (void __user *)arg; + struct drm_agp_buffer request; + struct drm_agp_buffer __user *argp = (void __user *)arg; int err; if (copy_from_user(&request, argp, sizeof(request))) @@ -323,7 +323,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ -int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request) { drm_agp_mem_t *entry; int ret; @@ -347,10 +347,10 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_agp_binding request; if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_unbind(dev, &request); @@ -370,7 +370,7 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ -int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request) { drm_agp_mem_t *entry; int retcode; @@ -398,10 +398,10 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_agp_binding request; if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_bind(dev, &request); @@ -422,7 +422,7 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ -int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request) { drm_agp_mem_t *entry; @@ -448,10 +448,10 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; + struct drm_agp_buffer request; if (copy_from_user - (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_buffer __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_free(dev, &request); diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index 6948d858..4c48d872 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -45,15 +45,15 @@ * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ -static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) +static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) { - drm_file_t *retval = NULL; - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_file *retval = NULL; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; - mutex_lock(&dev->struct_mutex); - if (!drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) { - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + mutex_lock(&dev->struct_mutex); + if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; } mutex_unlock(&dev->struct_mutex); @@ -71,10 +71,10 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) * associated the magic number hash key in drm_device::magiclist, while holding * the drm_device::struct_mutex lock. */ -static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, +static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, drm_magic_t magic) { - drm_magic_entry_t *entry; + struct drm_magic_entry *entry; DRM_DEBUG("%d\n", magic); @@ -101,10 +101,10 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ -static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) +static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) { - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; DRM_DEBUG("%d\n", magic); @@ -113,7 +113,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) mutex_unlock(&dev->struct_mutex); return -EINVAL; } - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_ht_remove_item(&dev->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); @@ -141,9 +141,9 @@ int drm_getmagic(struct inode *inode, struct file *filp, { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_auth auth; /* Find unique magic */ if (priv->magic) { @@ -161,7 +161,7 @@ int drm_getmagic(struct inode *inode, struct file *filp, } DRM_DEBUG("%u\n", auth.magic); - if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth))) + if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) return -EFAULT; return 0; } @@ -180,12 +180,12 @@ int drm_getmagic(struct inode *inode, struct file *filp, int drm_authmagic(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; - drm_file_t *file; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_auth auth; + struct drm_file *file; - if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth))) + if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) return -EFAULT; DRM_DEBUG("%u\n", auth.magic); if ((file = drm_find_file(dev, auth.magic))) { diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 2f3e4b2a..3f34de0e 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -102,11 +102,12 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * applicable and if supported by the kernel. */ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_map_list_t ** maplist) + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, + struct drm_map_list **maplist) { - drm_map_t *map; - drm_map_list_t *list; + struct drm_map *map; + struct drm_map_list *list; drm_dma_handle_t *dmah; unsigned long user_token; int ret; @@ -311,10 +312,10 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr) + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr) { - drm_map_list_t *list; + struct drm_map_list *list; int rc; rc = drm_addmap_core(dev, offset, size, type, flags, &list); @@ -330,9 +331,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t map; + struct drm_map map; drm_map_list_t *maplist; - drm_map_t __user *argp = (void __user *)arg; + struct drm_map __user *argp = (void __user *)arg; int err; if (!(filp->f_mode & 3)) @@ -351,7 +352,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, if (err) return err; - if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) + if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) return -EFAULT; /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ @@ -367,7 +368,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * \param inode device inode. * \param filp file pointer. * \param cmd command. - * \param arg pointer to a drm_map_t structure. + * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. * * Searches the map on drm_device::maplist, removes it from the list, see if @@ -459,12 +460,12 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t request; + struct drm_map request; drm_local_map_t *map = NULL; drm_map_list_t *r_list; int ret; - if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { + if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { return -EFAULT; } @@ -512,7 +513,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) +static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * entry) { int i; @@ -550,17 +551,17 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) * Add AGP buffers for DMA transfers * * \param dev drm_device_t to which the buffers are to be added. - * \param request pointer to a drm_buf_desc_t describing the request. + * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * * After some sanity checks creates a drm_buf structure for each buffer and * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_agp_mem_t *agp_entry; drm_buf_t *buf; unsigned long offset; @@ -727,7 +728,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ -int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; int count; @@ -735,7 +736,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) int size; int total; int page_order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_dma_handle_t *dmah; drm_buf_t *buf; int alignment; @@ -953,10 +954,10 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) } EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) +static int drm_addbufs_sg(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; @@ -1115,10 +1116,10 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) return 0; } -int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_fb(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; @@ -1283,7 +1284,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); * \param inode device inode. * \param filp file pointer. * \param cmd command. - * \param arg pointer to a drm_buf_desc_t request. + * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the @@ -1294,7 +1295,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_buf_desc_t request; + struct drm_buf_desc request; drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; int ret; @@ -1302,7 +1303,7 @@ int drm_addbufs(struct inode *inode, struct file *filp, if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, + if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, sizeof(request))) return -EFAULT; @@ -1350,8 +1351,8 @@ int drm_infobufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_info_t request; - drm_buf_info_t __user *argp = (void __user *)arg; + struct drm_buf_info request; + struct drm_buf_info __user *argp = (void __user *)arg; int i; int count; @@ -1382,9 +1383,9 @@ int drm_infobufs(struct inode *inode, struct file *filp, if (request.count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { - drm_buf_desc_t __user *to = + struct drm_buf_desc __user *to = &request.list[count]; - drm_buf_entry_t *from = &dma->bufs[i]; + struct drm_buf_entry *from = &dma->bufs[i]; drm_freelist_t *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, @@ -1438,9 +1439,9 @@ int drm_markbufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; + struct drm_buf_desc request; int order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1449,7 +1450,7 @@ int drm_markbufs(struct inode *inode, struct file *filp, return -EINVAL; if (copy_from_user(&request, - (drm_buf_desc_t __user *) arg, sizeof(request))) + (struct drm_buf_desc __user *) arg, sizeof(request))) return -EFAULT; DRM_DEBUG("%d, %d, %d\n", @@ -1488,7 +1489,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_free_t request; + struct drm_buf_free request; int i; int idx; drm_buf_t *buf; @@ -1500,7 +1501,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, return -EINVAL; if (copy_from_user(&request, - (drm_buf_free_t __user *) arg, sizeof(request))) + (struct drm_buf_free __user *) arg, sizeof(request))) return -EFAULT; DRM_DEBUG("%d\n", request.count); @@ -1544,12 +1545,12 @@ int drm_mapbufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_map_t __user *argp = (void __user *)arg; + struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; - drm_buf_map_t request; + struct drm_buf_map request; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1575,7 +1576,7 @@ int drm_mapbufs(struct inode *inode, struct file *filp, && (dma->flags & _DRM_DMA_USE_SG)) || (drm_core_check_feature(dev, DRIVER_FB_DMA) && (dma->flags & _DRM_DMA_USE_FB))) { - drm_map_t *map = dev->agp_buffer_map; + struct drm_map *map = dev->agp_buffer_map; unsigned long token = dev->agp_buffer_token; if (!map) { diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 195c7fb5..6f066ac4 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -145,10 +145,10 @@ int drm_getsareactx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t __user *argp = (void __user *)arg; - drm_ctx_priv_map_t request; - drm_map_t *map; - drm_map_list_t *_entry; + struct drm_ctx_priv_map __user *argp = (void __user *)arg; + struct drm_ctx_priv_map request; + struct drm_map *map; + struct drm_map_list *_entry; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; @@ -196,12 +196,12 @@ int drm_setsareactx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t request; - drm_map_t *map = NULL; - drm_map_list_t *r_list = NULL; + struct drm_ctx_priv_map request; + struct drm_map *map = NULL; + struct drm_map_list *r_list = NULL; if (copy_from_user(&request, - (drm_ctx_priv_map_t __user *) arg, sizeof(request))) + (struct drm_ctx_priv_map __user *) arg, sizeof(request))) return -EFAULT; mutex_lock(&dev->struct_mutex); @@ -301,9 +301,9 @@ static int drm_context_switch_complete(drm_device_t * dev, int new) int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_ctx_res_t res; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx_res res; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; int i; if (copy_from_user(&res, argp, sizeof(res))) @@ -339,10 +339,10 @@ int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_list_t *ctx_entry; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_device *dev = priv->head->dev; + struct drm_ctx_list *ctx_entry; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; if (copy_from_user(&ctx, argp, sizeof(ctx))) return -EFAULT; @@ -406,8 +406,8 @@ int drm_modctx(struct inode *inode, struct file *filp, int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; if (copy_from_user(&ctx, argp, sizeof(ctx))) return -EFAULT; @@ -436,9 +436,9 @@ int drm_switchctx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -461,9 +461,9 @@ int drm_newctx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -488,9 +488,9 @@ int drm_rmctx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 7129980b..5a2a14f9 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -44,7 +44,7 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - drm_draw_t draw; + struct drm_draw draw; int new_id = 0; int ret; @@ -67,7 +67,7 @@ again: DRM_DEBUG("%d\n", draw.handle); - DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); + DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw)); return 0; } @@ -78,10 +78,10 @@ again: int drm_rmdraw(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_draw_t draw; + struct drm_draw draw; unsigned long irqflags; - DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data, sizeof(draw)); spin_lock_irqsave(&dev->drw_lock, irqflags); @@ -99,13 +99,13 @@ int drm_rmdraw(DRM_IOCTL_ARGS) int drm_update_drawable_info(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_update_draw_t update; + struct drm_update_draw update; unsigned long irqflags; - drm_clip_rect_t *rects; + struct drm_clip_rect *rects; struct drm_drawable_info *info; int err; - DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, sizeof(update)); info = idr_find(&dev->drw_idr, update.handle); @@ -123,7 +123,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), + rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } else rects = info->rects; @@ -135,7 +135,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) } if (update.num && DRM_COPY_FROM_USER(rects, - (drm_clip_rect_t __user *) + (struct drm_clip_rect __user *) (unsigned long)update.data, update.num * sizeof(*rects))) { @@ -148,7 +148,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) if (rects != info->rects) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } info->rects = rects; @@ -168,7 +168,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) error: if (rects != info->rects) - drm_free(rects, update.num * sizeof(drm_clip_rect_t), + drm_free(rects, update.num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); return err; @@ -177,7 +177,7 @@ error: /** * Caller must hold the drawable spinlock! */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { return idr_find(&dev->drw_idr, id); } @@ -189,7 +189,7 @@ static int drm_drawable_free(int idr, void *p, void *data) if (info) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); drm_free(info, sizeof(*info), DRM_MEM_BUFS); } diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 30200ca4..fd817f88 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -550,8 +550,8 @@ static int drm_version(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_version_t __user *argp = (void __user *)arg; - drm_version_t version; + struct drm_version __user *argp = (void __user *)arg; + struct drm_version version; int len; if (copy_from_user(&version, argp, sizeof(version))) diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 97df972f..02f70243 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -54,8 +54,8 @@ int drm_getunique(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_unique_t __user *argp = (void __user *)arg; - drm_unique_t u; + struct drm_unique __user *argp = (void __user *)arg; + struct drm_unique u; if (copy_from_user(&u, argp, sizeof(u))) return -EFAULT; @@ -88,13 +88,13 @@ int drm_setunique(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_unique_t u; + struct drm_unique u; int domain, bus, slot, func, ret; if (dev->unique_len || dev->unique) return -EBUSY; - if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u))) + if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) return -EFAULT; if (!u.unique_len || u.unique_len > 1024) @@ -181,9 +181,9 @@ int drm_getmap(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t __user *argp = (void __user *)arg; - drm_map_t map; - drm_map_list_t *r_list = NULL; + struct drm_map __user *argp = (void __user *)arg; + struct drm_map map; + struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; @@ -242,8 +242,8 @@ int drm_getclient(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_client_t __user *argp = (drm_client_t __user *)arg; - drm_client_t client; + struct drm_client __user *argp = (struct drm_client __user *)arg; + struct drm_client client; drm_file_t *pt; int idx; int i; @@ -291,7 +291,7 @@ int drm_getstats(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_stats_t stats; + struct drm_stats stats; int i; memset(&stats, 0, sizeof(stats)); @@ -311,7 +311,7 @@ int drm_getstats(struct inode *inode, struct file *filp, mutex_unlock(&dev->struct_mutex); - if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) + if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats))) return -EFAULT; return 0; } @@ -330,10 +330,10 @@ int drm_getstats(struct inode *inode, struct file *filp, int drm_setversion(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_set_version_t sv; - drm_set_version_t retv; + struct drm_set_version sv; + struct drm_set_version retv; int if_version; - drm_set_version_t __user *argp = (void __user *)data; + struct drm_set_version __user *argp = (void __user *)data; if (copy_from_user(&sv, argp, sizeof(sv))) return -EFAULT; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 88716712..2e2c4d9c 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -55,8 +55,8 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_irq_busid_t __user *argp = (void __user *)arg; - drm_irq_busid_t p; + struct drm_irq_busid __user *argp = (void __user *)arg; + struct drm_irq_busid p; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; @@ -197,11 +197,11 @@ int drm_control(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_control_t ctl; + struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ - if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl))) + if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl))) return -EFAULT; switch (ctl.func) { @@ -244,8 +244,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_wait_vblank_t __user *argp = (void __user *)data; - drm_wait_vblank_t vblwait; + union drm_wait_vblank __user *argp = (void __user *)data; + union drm_wait_vblank vblwait; struct timeval now; int ret = 0; unsigned int flags, seq; diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index f02df36b..6d348251 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -54,12 +54,12 @@ int drm_lock(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; DECLARE_WAITQUEUE(entry, current); - drm_lock_t lock; + struct drm_lock lock; int ret = 0; ++priv->lock_count; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { @@ -154,10 +154,10 @@ int drm_unlock(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_lock_t lock; + struct drm_lock lock; unsigned long irqflags; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index e93a0406..e59f2afa 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -209,8 +209,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, { drm_device_t *dev = (drm_device_t *) data; int len = 0; - drm_map_t *map; - drm_map_list_t *r_list; + struct drm_map *map; + struct drm_map_list *r_list; /* Hardcoded from _DRM_FRAME_BUFFER, _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index c0d6db24..138ae087 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -36,7 +36,7 @@ #define DEBUG_SCATTER 0 -void drm_sg_cleanup(drm_sg_mem_t * entry) +void drm_sg_cleanup(struct drm_sg_mem *entry) { struct page *page; int i; @@ -63,9 +63,9 @@ EXPORT_SYMBOL(drm_sg_cleanup); # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) +int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) { - drm_sg_mem_t *entry; + struct drm_sg_mem *entry; unsigned long pages, i, j; DRM_DEBUG("%s\n", __FUNCTION__); @@ -191,8 +191,8 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_scatter_gather_t __user *argp = (void __user *)arg; - drm_scatter_gather_t request; + struct drm_scatter_gather __user *argp = (void __user *)arg; + struct drm_scatter_gather request; int ret; if (copy_from_user(&request, argp, sizeof(request))) @@ -216,14 +216,14 @@ int drm_sg_free(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t request; - drm_sg_mem_t *entry; + struct drm_scatter_gather request; + struct drm_sg_mem *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; if (copy_from_user(&request, - (drm_scatter_gather_t __user *) arg, + (struct drm_scatter_gather __user *) arg, sizeof(request))) return -EFAULT; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 72d63c10..7451adc5 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -87,7 +87,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, { drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; + struct drm_map *map = NULL; drm_map_list_t *r_list; drm_hash_item_t *hash; @@ -172,7 +172,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; unsigned long offset; unsigned long i; struct page *page; @@ -206,7 +206,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; drm_vma_entry_t *pt, *temp; - drm_map_t *map; + struct drm_map *map; drm_map_list_t *r_list; int found_maps = 0; @@ -321,7 +321,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; drm_sg_mem_t *entry = dev->sg; @@ -524,7 +524,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -unsigned long drm_core_get_map_ofs(drm_map_t * map) +unsigned long drm_core_get_map_ofs(struct drm_map * map) { return map->offset; } @@ -557,9 +557,9 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; + struct drm_map *map = NULL; unsigned long offset = 0; - drm_hash_item_t *hash; + struct drm_hash_item *hash; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 49379434..a4e0c390 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -346,7 +346,7 @@ static int i810_dma_initialize(drm_device_t * dev, drm_i810_private_t * dev_priv, drm_i810_init_t * init) { - drm_map_list_t *r_list; + struct drm_map_list *r_list; memset(dev_priv, 0, sizeof(drm_i810_private_t)); list_for_each_entry(r_list, &dev->maplist, head) { @@ -692,7 +692,7 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -765,7 +765,7 @@ static void i810_dma_dispatch_swap(drm_device_t * dev) drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -812,7 +812,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev, drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_clip_rect_t *box = sarea_priv->boxes; + struct drm_clip_rect *box = sarea_priv->boxes; int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; @@ -1140,7 +1140,7 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) + if (copy_to_user((void __user *) arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int)hw_status[5]; diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index beec4a2a..db59550d 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -163,7 +163,7 @@ typedef struct _drm_i810_sarea { unsigned int dirty; unsigned int nbox; - drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS]; /* Maintain an LRU of contiguous regions of texture space. If * you think you own a region of texture memory, and it has an diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 69d79499..dbe9d708 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -77,8 +77,8 @@ typedef struct _drm_i810_ring_buffer { } drm_i810_ring_buffer_t; typedef struct drm_i810_private { - drm_map_t *sarea_map; - drm_map_t *mmio_map; + struct drm_map *sarea_map; + struct drm_map *mmio_map; drm_i810_sarea_t *sarea_priv; drm_i810_ring_buffer_t ring; diff --git a/shared-core/drm.h b/shared-core/drm.h index f8912b3b..816a8ced 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -920,61 +920,61 @@ typedef struct drm_mm_init_arg { #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) -#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) -#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) -#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) -#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t) -#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t) -#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t) -#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t) +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) +#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) +#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) +#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) +#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) +#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) +#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) -#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) -#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) -#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) -#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) -#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) -#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) -#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) -#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) -#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) -#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) -#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) +#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) +#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) +#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) +#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) +#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) +#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) +#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) +#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) +#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) +#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) +#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) -#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t) +#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) -#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t) -#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t) +#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) +#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) -#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) -#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) -#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) -#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) -#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) -#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) -#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) -#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) -#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) -#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) -#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) -#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) -#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) +#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) +#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) +#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) +#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) +#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) +#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) +#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) +#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) +#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) +#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) +#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) +#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) +#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) -#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) -#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) -#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) -#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) -#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t) -#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t) +#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather) +#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) -#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) +#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) -#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) +#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, drm_mm_init_arg_t) #define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, drm_mm_type_arg_t) @@ -1016,8 +1016,7 @@ typedef struct drm_mm_init_arg { #define DRM_COMMAND_END 0xA0 /* typedef area */ -#if 1 -/*!defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)*/ +#if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) typedef struct drm_clip_rect drm_clip_rect_t; typedef struct drm_drawable_info drm_drawable_info_t; typedef struct drm_tex_region drm_tex_region_t; diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h index 43d1114f..34050a6d 100644 --- a/shared-core/drm_sarea.h +++ b/shared-core/drm_sarea.h @@ -50,29 +50,35 @@ #define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 /** SAREA drawable */ -typedef struct drm_sarea_drawable { +struct drm_sarea_drawable { unsigned int stamp; unsigned int flags; -} drm_sarea_drawable_t; +}; /** SAREA frame */ -typedef struct drm_sarea_frame { +struct drm_sarea_frame { unsigned int x; unsigned int y; unsigned int width; unsigned int height; unsigned int fullscreen; -} drm_sarea_frame_t; +}; /** SAREA */ -typedef struct drm_sarea { +struct drm_sarea { /** first thing is always the DRM locking structure */ - drm_hw_lock_t lock; + struct drm_hw_lock lock; /** \todo Use readers/writer lock for drm_sarea::drawable_lock */ - drm_hw_lock_t drawable_lock; - drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ - drm_sarea_frame_t frame; /**< frame */ + struct drm_hw_lock drawable_lock; + struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ + struct drm_sarea_frame frame; /**< frame */ drm_context_t dummy_context; -} drm_sarea_t; +}; + +#ifndef __KERNEL__ +typedef struct drm_sarea_drawable drm_sarea_drawable_t; +typedef struct drm_sarea_frame drm_sarea_frame_t; +typedef struct drm_sarea drm_sarea_t; +#endif #endif /* _DRM_SAREA_H_ */ diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index dbc5f959..f7d3fab4 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -398,11 +398,11 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) } static int i915_emit_box(drm_device_t * dev, - drm_clip_rect_t __user * boxes, + struct drm_clip_rect __user * boxes, int i, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; - drm_clip_rect_t box; + struct drm_clip_rect box; RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { @@ -524,7 +524,7 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev, drm_i915_batchbuffer_t * batch) { drm_i915_private_t *dev_priv = dev->dev_private; - drm_clip_rect_t __user *boxes = batch->cliprects; + struct drm_clip_rect __user *boxes = batch->cliprects; int nbox = batch->num_cliprects; int i = 0, count; RING_LOCALS; @@ -683,7 +683,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, batch.num_cliprects * - sizeof(drm_clip_rect_t))) + sizeof(struct drm_clip_rect))) return DRM_ERR(EFAULT); ret = i915_dispatch_batchbuffer(dev, &batch); @@ -712,7 +712,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) if (cmdbuf.num_cliprects && DRM_VERIFYAREA_READ(cmdbuf.cliprects, cmdbuf.num_cliprects * - sizeof(drm_clip_rect_t))) { + sizeof(struct drm_clip_rect))) { DRM_ERROR("Fault accessing cliprects\n"); return DRM_ERR(EFAULT); } diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 1c6ff4d3..3a90df6e 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -64,7 +64,7 @@ typedef struct _drm_i915_init { } drm_i915_init_t; typedef struct _drm_i915_sarea { - drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1]; + struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; int last_upload; /* last time texture was uploaded */ int last_enqueue; /* last time a buffer was enqueued */ int last_dispatch; /* age of the most recently dispatched buffer */ @@ -194,7 +194,7 @@ typedef struct _drm_i915_batchbuffer { int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ - drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */ + struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ } drm_i915_batchbuffer_t; /* As above, but pass a pointer to userspace buffer which can be @@ -206,7 +206,7 @@ typedef struct _drm_i915_cmdbuffer { int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ - drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */ + struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ } drm_i915_cmdbuffer_t; /* Userspace can request & wait on irq's: @@ -283,7 +283,7 @@ typedef struct drm_i915_vblank_pipe { */ typedef struct drm_i915_vblank_swap { drm_drawable_t drawable; - drm_vblank_seq_type_t seqtype; + enum drm_vblank_seq_type seqtype; unsigned int sequence; } drm_i915_vblank_swap_t; diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 2f6a6b95..eb32e194 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -43,7 +43,8 @@ * This function must be called with the drawable spinlock held. */ static void -i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) +i915_dispatch_vsync_flip(drm_device_t *dev, struct drm_drawable_info *drw, + int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -67,7 +68,7 @@ i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) if (x2 > 0 && y2 > 0) { int i, num_rects = drw->num_rects; - drm_clip_rect_t *rect = drw->rects; + struct drm_clip_rect *rect = drw->rects; for (i = 0; i < num_rects; i++) if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || @@ -94,7 +95,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; unsigned counter[2] = { atomic_read(&dev->vbl_received), atomic_read(&dev->vbl_received2) }; - drm_drawable_info_t *drw; + struct drm_drawable_info *drw; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 cpp = dev_priv->cpp, offsets[3]; u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | @@ -139,7 +140,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) list_for_each(hit, &hits) { drm_i915_vbl_swap_t *swap_cmp = list_entry(hit, drm_i915_vbl_swap_t, head); - drm_drawable_info_t *drw_cmp = + struct drm_drawable_info *drw_cmp = drm_get_drawable_info(dev, swap_cmp->drw_id); if (drw_cmp && @@ -198,7 +199,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) list_for_each(hit, &hits) { drm_i915_vbl_swap_t *swap_hit = list_entry(hit, drm_i915_vbl_swap_t, head); - drm_clip_rect_t *rect; + struct drm_clip_rect *rect; int num_rects, pipe, front, back; unsigned short top, bottom; @@ -625,7 +626,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) swap.sequence--; if ((curseq - swap.sequence) <= (1<<23)) { - drm_drawable_info_t *drw; + struct drm_drawable_info *drw; LOCK_TEST_WITH_RETURN(dev, filp); diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index 13f19f3a..c66edfab 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -47,7 +47,7 @@ static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_tex_region_t *list; + struct drm_tex_region *list; unsigned shift, nr; unsigned start; unsigned end; diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index 60f55900..47340165 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -1462,10 +1462,10 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf) /*@{*/ static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev, - drm_dma_t * d) + struct drm_dma * d) { int i; - drm_buf_t *buf; + struct drm_buf *buf; drm_mach64_private_t *dev_priv = dev->dev_private; for (i = d->granted_count; i < d->request_count; i++) { @@ -1495,13 +1495,13 @@ static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev, int mach64_dma_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; - drm_dma_t d; + struct drm_device_dma *dma = dev->dma; + struct drm_dma d; int ret = 0; LOCK_TEST_WITH_RETURN(dev, filp); - DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d)); + DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma *) data, sizeof(d)); /* Please don't send us buffers. */ @@ -1525,7 +1525,7 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) ret = mach64_dma_get_buffers(filp, dev, &d); } - DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d)); + DRM_COPY_TO_USER_IOCTL((struct drm_dma *) data, d, sizeof(d)); return ret; } diff --git a/shared-core/mach64_drm.h b/shared-core/mach64_drm.h index 083f959d..1f5fd842 100644 --- a/shared-core/mach64_drm.h +++ b/shared-core/mach64_drm.h @@ -130,7 +130,7 @@ typedef struct drm_mach64_sarea { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[MACH64_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Counters for client-side throttling of rendering clients. @@ -139,7 +139,7 @@ typedef struct drm_mach64_sarea { /* Texture memory LRU. */ - drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS + + struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS + 1]; unsigned int tex_age[MACH64_NR_TEX_HEAPS]; int ctx_owner; diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 38cefca9..a1047cbd 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -86,10 +86,10 @@ static void mach64_print_dirty(const char *msg, unsigned int flags) * negative for an error */ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv, - drm_clip_rect_t * box) + struct drm_clip_rect * box) { u32 sc_left_right, sc_top_bottom; - drm_clip_rect_t scissor; + struct drm_clip_rect scissor; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *regs = &sarea_priv->context_state; DMALOCALS; @@ -222,7 +222,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev, drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *ctx = &sarea_priv->context_state; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; u32 fb_bpp, depth_bpp; int i; DMALOCALS; @@ -360,7 +360,7 @@ static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev) drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; u32 fb_bpp; int i; DMALOCALS; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index d48313c7..ea6212fe 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -443,11 +443,11 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, const unsigned secondary_size = dma_bs->secondary_bin_count * dma_bs->secondary_bin_size; const unsigned agp_size = (dma_bs->agp_size << 20); - drm_buf_desc_t req; - drm_agp_mode_t mode; - drm_agp_info_t info; - drm_agp_buffer_t agp_req; - drm_agp_binding_t bind_req; + struct drm_buf_desc req; + struct drm_agp_mode mode; + struct drm_agp_info info; + struct drm_agp_buffer agp_req; + struct drm_agp_binding bind_req; /* Acquire AGP. */ err = drm_agp_acquire(dev); @@ -611,7 +611,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev, unsigned int primary_size; unsigned int bin_count; int err; - drm_buf_desc_t req; + struct drm_buf_desc req; if (dev->dma == NULL) { @@ -967,8 +967,8 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup) if (dev_priv->used_new_dma_init) { if (dev_priv->agp_handle != 0) { - drm_agp_binding_t unbind_req; - drm_agp_buffer_t free_req; + struct drm_agp_binding unbind_req; + struct drm_agp_buffer free_req; unbind_req.handle = dev_priv->agp_handle; drm_agp_unbind(dev, &unbind_req); @@ -1043,11 +1043,11 @@ int mga_dma_flush(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - drm_lock_t lock; + struct drm_lock lock; LOCK_TEST_WITH_RETURN(dev, filp); - DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, sizeof(lock)); DRM_DEBUG("%s%s%s\n", @@ -1089,7 +1089,7 @@ int mga_dma_reset(DRM_IOCTL_ARGS) * DMA buffer management */ -static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d) +static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, struct drm_dma * d) { drm_buf_t *buf; int i; @@ -1118,8 +1118,8 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - drm_dma_t __user *argp = (void __user *)data; - drm_dma_t d; + struct drm_dma __user *argp = (void __user *)data; + struct drm_dma d; int ret = 0; LOCK_TEST_WITH_RETURN(dev, filp); diff --git a/shared-core/mga_drm.h b/shared-core/mga_drm.h index 5bcdbfab..15c2dea2 100644 --- a/shared-core/mga_drm.h +++ b/shared-core/mga_drm.h @@ -181,7 +181,7 @@ typedef struct _drm_mga_sarea { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Information about the most recently used 3d drawable. The @@ -202,7 +202,7 @@ typedef struct _drm_mga_sarea { unsigned int exported_nback; int exported_back_x, exported_front_x, exported_w; int exported_back_y, exported_front_y, exported_h; - drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS]; /* Counters for aging textures and for client-side throttling. */ @@ -216,7 +216,7 @@ typedef struct _drm_mga_sarea { /* LRU lists for texture memory in agp space and on the card. */ - drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1]; + struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1]; unsigned int texAge[MGA_NR_TEX_HEAPS]; /* Mechanism to validate card state. diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 8e5cb334..527f6ce7 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -43,7 +43,7 @@ */ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, - drm_clip_rect_t * box) + struct drm_clip_rect * box) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; @@ -509,7 +509,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; @@ -525,7 +525,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) ADVANCE_DMA(); for (i = 0; i < nbox; i++) { - drm_clip_rect_t *box = &pbox[i]; + struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; DRM_DEBUG(" from=%d,%d to=%d,%d\n", @@ -599,7 +599,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; @@ -626,7 +626,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) MGA_DWGCTL, MGA_DWGCTL_COPY); for (i = 0; i < nbox; i++) { - drm_clip_rect_t *box = &pbox[i]; + struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; u32 start = box->y1 * dev_priv->front_pitch; @@ -805,7 +805,7 @@ static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit) drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index b39a7932..4016f004 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -137,7 +137,7 @@ enum nouveau_bus_type { struct drm_nouveau_sarea { /* the cliprects */ - drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; unsigned int nbox; }; diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index f09bcea7..ef9df359 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -288,7 +288,7 @@ int nouveau_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_size; - drm_scatter_gather_t sgreq; + struct drm_scatter_gather sgreq; dev_priv->agp_phys=0; dev_priv->fb_phys=0; sgreq . size = 4 << 20; //4MB of PCI scatter-gather zone @@ -298,10 +298,10 @@ int nouveau_mem_init(struct drm_device *dev) if (drm_device_is_agp(dev)) { int err; - drm_agp_info_t info; - drm_agp_mode_t mode; - drm_agp_buffer_t agp_req; - drm_agp_binding_t bind_req; + struct drm_agp_info info; + struct drm_agp_mode mode; + struct drm_agp_buffer agp_req; + struct drm_agp_binding bind_req; err = drm_agp_acquire(dev); if (err) { diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index a2ee18b7..2d650b47 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -884,7 +884,7 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) return DRM_ERR(EBUSY); } -static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d) +static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, struct drm_dma * d) { int i; drm_buf_t *buf; @@ -913,8 +913,8 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) DRM_DEVICE; drm_device_dma_t *dma = dev->dma; int ret = 0; - drm_dma_t __user *argp = (void __user *)data; - drm_dma_t d; + struct drm_dma __user *argp = (void __user *)data; + struct drm_dma d; LOCK_TEST_WITH_RETURN(dev, filp); diff --git a/shared-core/r128_drm.h b/shared-core/r128_drm.h index 6e8af313..e94a39c6 100644 --- a/shared-core/r128_drm.h +++ b/shared-core/r128_drm.h @@ -153,7 +153,7 @@ typedef struct drm_r128_sarea { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Counters for client-side throttling of rendering clients. @@ -161,7 +161,7 @@ typedef struct drm_r128_sarea { unsigned int last_frame; unsigned int last_dispatch; - drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1]; + struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1]; unsigned int tex_age[R128_NR_TEX_HEAPS]; int ctx_owner; int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */ diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index 17b11e7d..565e0d4f 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -38,7 +38,7 @@ */ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, - drm_clip_rect_t * boxes, int count) + struct drm_clip_rect * boxes, int count) { u32 aux_sc_cntl = 0x00000000; RING_LOCALS; @@ -358,7 +358,7 @@ static void r128_cce_dispatch_clear(drm_device_t * dev, drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; int i; RING_LOCALS; @@ -463,7 +463,7 @@ static void r128_cce_dispatch_swap(drm_device_t * dev) drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int i; RING_LOCALS; DRM_DEBUG("%s\n", __FUNCTION__); diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index 0cd5d7e2..ab4f1cae 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -55,7 +55,7 @@ static const int r300_cliprect_cntl[4] = { static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int n) { - drm_clip_rect_t box; + struct drm_clip_rect box; int nr; int i; RING_LOCALS; diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index ec2e688b..ba06443f 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -2191,7 +2191,7 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) } static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev, - drm_dma_t * d) + struct drm_dma * d) { int i; drm_buf_t *buf; @@ -2220,8 +2220,8 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) DRM_DEVICE; drm_device_dma_t *dma = dev->dma; int ret = 0; - drm_dma_t __user *argp = (void __user *)data; - drm_dma_t d; + struct drm_dma __user *argp = (void __user *)data; + struct drm_dma d; LOCK_TEST_WITH_RETURN(dev, filp); diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h index 6a57b804..b0ef702b 100644 --- a/shared-core/radeon_drm.h +++ b/shared-core/radeon_drm.h @@ -417,7 +417,7 @@ typedef struct { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Counters for client-side throttling of rendering clients. @@ -426,7 +426,7 @@ typedef struct { unsigned int last_dispatch; unsigned int last_clear; - drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + + struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + 1]; unsigned int tex_age[RADEON_NR_TEX_HEAPS]; int ctx_owner; @@ -604,7 +604,7 @@ typedef struct drm_radeon_cmd_buffer { int bufsz; char __user *buf; int nbox; - drm_clip_rect_t __user *boxes; + struct drm_clip_rect __user *boxes; } drm_radeon_cmd_buffer_t; typedef struct drm_radeon_tex_image { diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 92a9b65e..03d2e7f2 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -312,7 +312,7 @@ typedef struct drm_radeon_kcmd_buffer { int bufsz; char *buf; int nbox; - drm_clip_rect_t __user *boxes; + struct drm_clip_rect __user *boxes; } drm_radeon_kcmd_buffer_t; extern int radeon_no_wb; diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 8ccd0981..882cd323 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -421,7 +421,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * */ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, - drm_clip_rect_t * box) + struct drm_clip_rect * box) { RING_LOCALS; @@ -852,7 +852,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0; int i; @@ -1340,7 +1340,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int i; RING_LOCALS; DRM_DEBUG("\n"); @@ -1415,7 +1415,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) static void radeon_cp_dispatch_flip(drm_device_t * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle; + struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) ? dev_priv->front_offset : dev_priv->back_offset; RING_LOCALS; @@ -2795,10 +2795,10 @@ static int radeon_emit_packet3_cliprect(drm_device_t *dev, int orig_nbox) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_clip_rect_t box; + struct drm_clip_rect box; unsigned int cmdsz; int ret; - drm_clip_rect_t __user *boxes = cmdbuf->boxes; + struct drm_clip_rect __user *boxes = cmdbuf->boxes; int i = 0; RING_LOCALS; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 9a3ae1f1..7492a38c 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -1006,7 +1006,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS) * DMA buffer management */ -static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d) +static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, struct drm_dma *d) { drm_buf_t *buf; int i; @@ -1034,12 +1034,12 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; - drm_dma_t d; + struct drm_dma d; int ret = 0; LOCK_TEST_WITH_RETURN(dev, filp); - DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d)); + DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *)data, sizeof(d)); /* Please don't send us buffers. */ @@ -1063,7 +1063,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) ret = savage_bci_get_buffers(filp, dev, &d); } - DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d)); + DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *)data, d, sizeof(d)); return ret; } diff --git a/shared-core/savage_drm.h b/shared-core/savage_drm.h index 6526c9aa..b960d557 100644 --- a/shared-core/savage_drm.h +++ b/shared-core/savage_drm.h @@ -47,7 +47,7 @@ typedef struct _drm_savage_sarea { /* LRU lists for texture memory in agp space and on the card. */ - drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; + struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; unsigned int texAge[SAVAGE_NR_TEX_HEAPS]; /* Mechanism to validate card state. @@ -112,7 +112,7 @@ typedef struct drm_savage_cmdbuf { unsigned int vb_size; /* size of client vertex buffer in bytes */ unsigned int vb_stride; /* stride of vertices in 32bit words */ /* boxes in client's address space */ - drm_clip_rect_t __user *box_addr; + struct drm_clip_rect __user *box_addr; unsigned int nbox; /* number of clipping boxes */ } drm_savage_cmdbuf_t; diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 88c571e1..8d04d43b 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -192,7 +192,7 @@ typedef struct drm_savage_private { /* Err, there is a macro wait_event in include/linux/wait.h. * Avoid unwanted macro expansion. */ void (*emit_clip_rect)(struct drm_savage_private *dev_priv, - const drm_clip_rect_t *pbox); + const struct drm_clip_rect *pbox); void (*dma_flush)(struct drm_savage_private *dev_priv); } drm_savage_private_t; @@ -216,9 +216,9 @@ extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp); /* state functions */ extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox); + const struct drm_clip_rect *pbox); extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox); + const struct drm_clip_rect *pbox); #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index acc98f89..54b9169b 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -27,7 +27,7 @@ #include "savage_drv.h" void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox) + const struct drm_clip_rect *pbox) { uint32_t scstart = dev_priv->state.s3d.new_scstart; uint32_t scend = dev_priv->state.s3d.new_scend; @@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, } void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox) + const struct drm_clip_rect *pbox) { uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; @@ -790,7 +790,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *cmd_header, const drm_savage_cmd_header_t *data, unsigned int nbox, - const drm_clip_rect_t *boxes) + const struct drm_clip_rect *boxes) { unsigned int flags = cmd_header->clear0.flags; unsigned int clear_cmd; @@ -860,7 +860,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv, } static int savage_dispatch_swap(drm_savage_private_t *dev_priv, - unsigned int nbox, const drm_clip_rect_t *boxes) + unsigned int nbox, const struct drm_clip_rect *boxes) { unsigned int swap_cmd; unsigned int i; @@ -895,7 +895,7 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv, const unsigned int *vtxbuf, unsigned int vb_size, unsigned int vb_stride, unsigned int nbox, - const drm_clip_rect_t *boxes) + const struct drm_clip_rect *boxes) { unsigned int i, j; int ret; @@ -962,7 +962,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) drm_savage_cmd_header_t *kcmd_addr = NULL; drm_savage_cmd_header_t *first_draw_cmd; unsigned int *kvb_addr = NULL; - drm_clip_rect_t *kbox_addr = NULL; + struct drm_clip_rect *kbox_addr = NULL; unsigned int i, j; int ret = 0; @@ -1018,7 +1018,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) cmdbuf.vb_addr = kvb_addr; } if (cmdbuf.nbox) { - kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t), + kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); if (kbox_addr == NULL) { ret = DRM_ERR(ENOMEM); @@ -1026,7 +1026,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) } if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, - cmdbuf.nbox * sizeof(drm_clip_rect_t))) { + cmdbuf.nbox * sizeof(struct drm_clip_rect))) { ret = DRM_ERR(EFAULT); goto done; } @@ -1157,7 +1157,7 @@ done: /* If we didn't need to allocate them, these'll be NULL */ drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); - drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t), + drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); return ret; diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index 635e4637..b15785b3 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -54,7 +54,7 @@ #define VIA_NR_XVMC_LOCKS 5 #define VIA_MAX_CACHELINE_SIZE 64 #define XVMCLOCKPTR(saPriv,lockNo) \ - ((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \ + ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \ (VIA_MAX_CACHELINE_SIZE - 1)) & \ ~(VIA_MAX_CACHELINE_SIZE - 1)) + \ VIA_MAX_CACHELINE_SIZE*(lockNo))) @@ -187,7 +187,7 @@ typedef struct _drm_via_tex_region { typedef struct _drm_via_sarea { unsigned int dirty; unsigned int nbox; - drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS]; drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1]; int texAge; /* last time texture was uploaded */ int ctxOwner; /* last context to upload state */ From 1a07256d601a94466b7905680f5b929bf3f2390a Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 11:30:53 +1000 Subject: [PATCH 115/437] drm: remove ttm userspace typedefs --- linux-core/drm_bo.c | 2 +- linux-core/drm_fence.c | 18 +++++++-------- linux-core/drm_objects.h | 2 +- shared-core/drm.h | 49 ++++++++++++++++++++++------------------ 4 files changed, 38 insertions(+), 33 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2ce3dbca..4a80cf39 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1589,7 +1589,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, int drm_buffer_object_create(drm_device_t *dev, unsigned long size, - drm_bo_type_t type, + enum drm_bo_type type, uint64_t mask, uint32_t hint, uint32_t page_alignment, diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index b6f14249..ccd9b19c 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -570,7 +570,7 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -618,7 +618,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_user_object_t *uo; ret = 0; @@ -646,7 +646,7 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; drm_user_object_t *uo; unsigned long flags; @@ -680,7 +680,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; ret = 0; if (!fm->initialized) { @@ -697,7 +697,7 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -729,7 +729,7 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -763,7 +763,7 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -799,7 +799,7 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -834,7 +834,7 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 8b65f90a..4bd9047c 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -337,7 +337,7 @@ typedef struct drm_buffer_object { atomic_t usage; unsigned long buffer_start; - drm_bo_type_t type; + enum drm_bo_type type; unsigned long offset; atomic_t mapped; drm_bo_mem_reg_t mem; diff --git a/shared-core/drm.h b/shared-core/drm.h index 816a8ced..3ab63d5d 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -670,7 +670,7 @@ struct drm_set_version { #define DRM_FENCE_TYPE_EXE 0x00000001 -typedef struct drm_fence_arg { +struct drm_fence_arg { unsigned int handle; unsigned int class; unsigned int type; @@ -678,7 +678,7 @@ typedef struct drm_fence_arg { unsigned int signaled; unsigned int pad64; drm_u64_t expand_pad[3]; /*Future expansion */ -} drm_fence_arg_t; +}; /* Buffer permissions, referring to how the GPU uses the buffers. * these translate to fence types used for the buffers. @@ -784,12 +784,12 @@ typedef struct drm_fence_arg { #define DRM_BO_INIT_MINOR 1 -typedef enum { +enum drm_bo_type { drm_bo_type_dc, drm_bo_type_user, drm_bo_type_fake, drm_bo_type_kernel, /* for initial kernel allocations */ -}drm_bo_type_t; +}; struct drm_bo_info_req { drm_u64_t mask; @@ -806,7 +806,7 @@ struct drm_bo_create_req { drm_u64_t buffer_start; unsigned int hint; unsigned int page_alignment; - drm_bo_type_t type; + enum drm_bo_type type; unsigned int pad64; }; @@ -896,18 +896,18 @@ struct drm_bo_op_arg { #define DRM_BO_MEM_TYPES 8 /* For now. */ -typedef struct drm_mm_type_arg { +struct drm_mm_type_arg { unsigned int mem_type; -} drm_mm_type_arg_t; +}; -typedef struct drm_mm_init_arg { +struct drm_mm_init_arg { unsigned int magic; unsigned int major; unsigned int minor; unsigned int mem_type; drm_u64_t p_offset; drm_u64_t p_size; -} drm_mm_init_arg_t; +}; /** * \name Ioctls Definitions @@ -976,20 +976,20 @@ typedef struct drm_mm_init_arg { #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) -#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, drm_mm_init_arg_t) -#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, drm_mm_type_arg_t) -#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, drm_mm_type_arg_t) -#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, drm_mm_type_arg_t) +#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) +#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) +#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) +#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) -#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, drm_fence_arg_t) -#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, drm_fence_arg_t) +#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) #define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg) @@ -1057,6 +1057,11 @@ typedef struct drm_agp_binding drm_agp_binding_t; typedef struct drm_agp_info drm_agp_info_t; typedef struct drm_scatter_gather drm_scatter_gather_t; typedef struct drm_set_version drm_set_version_t; + +typedef struct drm_fence_arg drm_fence_arg_t; +typedef struct drm_mm_type_arg drm_mm_type_arg_t; +typedef struct drm_mm_init_arg drm_mm_init_arg_t; +typedef enum drm_bo_type drm_bo_type_t; #endif #endif From 21ee6fbfb8f2219a454458204afc9c5fcd89f9a8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 12:32:51 +1000 Subject: [PATCH 116/437] drm: remove drmP.h internal typedefs --- linux-core/ati_pcigart.c | 8 +- linux-core/drmP.h | 236 ++++++++++++++++++------------------ linux-core/drm_agpsupport.c | 92 +++++++------- linux-core/drm_bo.c | 96 +++++++-------- linux-core/drm_bo_move.c | 10 +- linux-core/drm_bufs.c | 88 +++++++------- linux-core/drm_context.c | 36 +++--- linux-core/drm_dma.c | 12 +- linux-core/drm_drawable.c | 4 +- linux-core/drm_drv.c | 34 +++--- linux-core/drm_fence.c | 24 ++-- linux-core/drm_fops.c | 26 ++-- linux-core/drm_ioctl.c | 26 ++-- linux-core/drm_irq.c | 28 ++--- linux-core/drm_lock.c | 20 +-- linux-core/drm_memory.c | 8 +- linux-core/drm_mm.c | 66 +++++----- linux-core/drm_object.c | 32 ++--- linux-core/drm_objects.h | 38 +++--- linux-core/drm_pci.c | 6 +- linux-core/drm_proc.c | 36 +++--- linux-core/drm_scatter.c | 6 +- linux-core/drm_sman.c | 12 +- linux-core/drm_stub.c | 20 +-- linux-core/drm_sysfs.c | 4 +- linux-core/drm_vm.c | 68 +++++------ linux-core/i810_dma.c | 136 ++++++++++----------- linux-core/i810_drv.h | 14 +-- linux-core/i915_buffer.c | 10 +- linux-core/i915_fence.c | 10 +- linux-core/mga_drv.c | 4 +- linux-core/nouveau_drv.c | 2 +- linux-core/sis_drv.c | 4 +- linux-core/sis_mm.c | 14 +-- linux-core/via_buffer.c | 8 +- linux-core/via_dmablit.c | 20 +-- linux-core/via_dmablit.h | 2 +- linux-core/via_fence.c | 8 +- linux-core/via_mm.c | 4 +- shared-core/i915_dma.c | 40 +++--- shared-core/i915_drv.h | 46 +++---- shared-core/i915_irq.c | 24 ++-- shared-core/i915_mem.c | 4 +- shared-core/mach64_dma.c | 16 +-- shared-core/mach64_drv.h | 18 +-- shared-core/mach64_irq.c | 10 +- shared-core/mach64_state.c | 10 +- shared-core/mga_dma.c | 42 +++---- shared-core/mga_drv.h | 22 ++-- shared-core/mga_irq.c | 12 +- shared-core/mga_state.c | 24 ++-- shared-core/nouveau_fifo.c | 4 +- shared-core/nouveau_mem.c | 2 +- shared-core/r128_cce.c | 34 +++--- shared-core/r128_drv.h | 20 +-- shared-core/r128_irq.c | 10 +- shared-core/r128_state.c | 56 ++++----- shared-core/r300_cmdbuf.c | 8 +- shared-core/radeon_cp.c | 34 +++--- shared-core/radeon_drv.h | 36 +++--- shared-core/radeon_irq.c | 24 ++-- shared-core/radeon_state.c | 92 +++++++------- shared-core/savage_bci.c | 32 ++--- shared-core/savage_drv.h | 14 +-- shared-core/savage_state.c | 2 +- shared-core/sis_drv.h | 12 +- shared-core/via_dma.c | 12 +- shared-core/via_drv.h | 42 +++---- shared-core/via_irq.c | 12 +- shared-core/via_map.c | 8 +- shared-core/via_verifier.c | 8 +- shared-core/via_verifier.h | 6 +- 72 files changed, 1004 insertions(+), 1004 deletions(-) diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c index 524618a8..7241c2a8 100644 --- a/linux-core/ati_pcigart.c +++ b/linux-core/ati_pcigart.c @@ -81,9 +81,9 @@ static void drm_ati_free_pcigart_table(void *address, int order) free_pages((unsigned long)address, order); } -int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; unsigned long pages; int i; int order; @@ -132,9 +132,9 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) } EXPORT_SYMBOL(drm_ati_pcigart_cleanup); -int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; void *address = NULL; unsigned long pages; u32 *pci_gart, page_base, bus_address = 0; diff --git a/linux-core/drmP.h b/linux-core/drmP.h index cf2ed2ed..df7481fe 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -291,22 +291,22 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 -typedef struct drm_ioctl_desc { +struct drm_ioctl_desc { drm_ioctl_t *func; int flags; -} drm_ioctl_desc_t; +}; -typedef struct drm_magic_entry { +struct drm_magic_entry { struct list_head head; struct drm_hash_item hash_item; struct drm_file *priv; -} drm_magic_entry_t; +}; -typedef struct drm_vma_entry { +struct drm_vma_entry { struct list_head head; struct vm_area_struct *vma; pid_t pid; -} drm_vma_entry_t; +}; /** * DMA buffer. @@ -340,7 +340,7 @@ typedef struct drm_buf { } drm_buf_t; /** bufs is one longer than it has to be */ -typedef struct drm_waitlist { +struct drm_waitlist { int count; /**< Number of possible buffers */ struct drm_buf **bufs; /**< List of pointers to buffers */ struct drm_buf **rp; /**< Read pointer */ @@ -348,9 +348,9 @@ typedef struct drm_waitlist { struct drm_buf **end; /**< End pointer */ spinlock_t read_lock; spinlock_t write_lock; -} drm_waitlist_t; +}; -typedef struct drm_freelist { +struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ struct drm_buf *next; /**< End pointer */ @@ -360,7 +360,7 @@ typedef struct drm_freelist { int high_mark; /**< High water mark */ atomic_t wfh; /**< If waiting for high mark */ spinlock_t lock; -} drm_freelist_t; +}; typedef struct drm_dma_handle { dma_addr_t busaddr; @@ -371,7 +371,7 @@ typedef struct drm_dma_handle { /** * Buffer entry. There is one of this for each buffer size order. */ -typedef struct drm_buf_entry { +struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ struct drm_buf *buflist; /**< buffer list */ @@ -379,7 +379,7 @@ typedef struct drm_buf_entry { int page_order; struct drm_dma_handle **seglist; struct drm_freelist freelist; -} drm_buf_entry_t; +}; /* * This should be small enough to allow the use of kmalloc for hash tables @@ -395,7 +395,7 @@ typedef enum{ /** File private data */ -typedef struct drm_file { +struct drm_file { int authenticated; int master; int minor; @@ -420,10 +420,10 @@ typedef struct drm_file { drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; void *driver_priv; -} drm_file_t; +}; /** Wait queue */ -typedef struct drm_queue { +struct drm_queue { atomic_t use_count; /**< Outstanding uses (+1) */ atomic_t finalization; /**< Finalization in progress */ atomic_t block_count; /**< Count of processes waiting */ @@ -439,12 +439,12 @@ typedef struct drm_queue { enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ -} drm_queue_t; +}; /** * Lock data. */ -typedef struct drm_lock_data { +struct drm_lock_data { struct drm_hw_lock *hw_lock; /**< Hardware lock */ struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ @@ -453,12 +453,12 @@ typedef struct drm_lock_data { uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; -} drm_lock_data_t; +}; /** * DMA data. */ -typedef struct drm_device_dma { +struct drm_device_dma { struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ @@ -474,25 +474,25 @@ typedef struct drm_device_dma { _DRM_DMA_USE_PCI_RO = 0x08 } flags; -} drm_device_dma_t; +}; /** * AGP memory entry. Stored as a doubly linked list. */ -typedef struct drm_agp_mem { +struct drm_agp_mem { unsigned long handle; /**< handle */ DRM_AGP_MEM *memory; unsigned long bound; /**< address */ int pages; struct list_head head; -} drm_agp_mem_t; +}; /** * AGP data. * * \sa drm_agp_init)() and drm_device::agp. */ -typedef struct drm_agp_head { +struct drm_agp_head { DRM_AGP_KERN agp_info; /**< AGP device information */ struct list_head memory; unsigned long mode; /**< AGP mode */ @@ -505,30 +505,30 @@ typedef struct drm_agp_head { int agp_mtrr; int cant_use_aperture; unsigned long page_mask; -} drm_agp_head_t; +}; /** * Scatter-gather memory. */ -typedef struct drm_sg_mem { +struct drm_sg_mem { unsigned long handle; void *virtual; int pages; struct page **pagelist; dma_addr_t *busaddr; -} drm_sg_mem_t; +}; -typedef struct drm_sigdata { +struct drm_sigdata { int context; struct drm_hw_lock *lock; -} drm_sigdata_t; +}; /* * Generic memory manager structs */ -typedef struct drm_mm_node { +struct drm_mm_node { struct list_head fl_entry; struct list_head ml_entry; int free; @@ -536,42 +536,42 @@ typedef struct drm_mm_node { unsigned long size; struct drm_mm *mm; void *private; -} drm_mm_node_t; +}; -typedef struct drm_mm { +struct drm_mm { struct list_head fl_entry; struct list_head ml_entry; -} drm_mm_t; +}; /** * Mappings list */ -typedef struct drm_map_list { +struct drm_map_list { struct list_head head; /**< list head */ struct drm_hash_item hash; struct drm_map *map; /**< mapping */ drm_u64_t user_token; - drm_mm_node_t *file_offset_node; -} drm_map_list_t; + struct drm_mm_node *file_offset_node; +}; typedef struct drm_map drm_local_map_t; /** * Context handle list */ -typedef struct drm_ctx_list { +struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ struct drm_file *tag; /**< associated fd private data */ -} drm_ctx_list_t; +}; -typedef struct drm_vbl_sig { +struct drm_vbl_sig { struct list_head head; unsigned int sequence; struct siginfo info; struct task_struct *task; -} drm_vbl_sig_t; +}; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 @@ -581,14 +581,14 @@ typedef struct drm_vbl_sig { #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 -typedef struct ati_pcigart_info { +struct ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; drm_local_map_t mapping; int table_size; -} drm_ati_pcigart_info; +}; #include "drm_objects.h" @@ -602,9 +602,9 @@ struct drm_device; struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); - int (*open) (struct drm_device *, drm_file_t *); + int (*open) (struct drm_device *, struct drm_file *); void (*preclose) (struct drm_device *, struct file * filp); - void (*postclose) (struct drm_device *, drm_file_t *); + void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); int (*dma_ioctl) (DRM_IOCTL_ARGS); @@ -659,7 +659,7 @@ struct drm_driver { /* variables */ u32 driver_features; int dev_priv_size; - drm_ioctl_desc_t *ioctls; + struct drm_ioctl_desc *ioctls; int num_ioctls; struct file_operations fops; struct pci_driver pci_driver; @@ -670,20 +670,20 @@ struct drm_driver { * that may contain multiple heads. Embed one per head of these in the * private drm_device structure. */ -typedef struct drm_head { +struct drm_head { int minor; /**< Minor device number */ struct drm_device *dev; struct proc_dir_entry *dev_root; /**< proc directory entry */ dev_t device; /**< Device number for mknod */ struct class_device *dev_class; -} drm_head_t; +}; /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ -typedef struct drm_device { +struct drm_device { char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ char *devname; /**< For /proc/interrupts */ @@ -725,8 +725,8 @@ typedef struct drm_device { struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ - drm_mm_t offset_manager; /**< User token manager */ - drm_open_hash_t object_hash; /**< User token hash table for objects */ + struct drm_mm offset_manager; /**< User token manager */ + struct drm_open_hash object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -814,15 +814,15 @@ typedef struct drm_device { spinlock_t drw_lock; struct idr drw_idr; /*@} */ -} drm_device_t; +}; #if __OS_HAS_AGP -typedef struct drm_agp_ttm_backend { +struct drm_agp_ttm_backend { drm_ttm_backend_t backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; -} drm_agp_ttm_backend_t; +}; #endif @@ -900,7 +900,7 @@ extern int drm_ioctl(struct inode *inode, struct file *filp, extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lastclose(drm_device_t * dev); +extern int drm_lastclose(struct drm_device *dev); /* Device support (drm_fops.h) */ extern int drm_open(struct inode *inode, struct file *filp); @@ -924,7 +924,7 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); extern unsigned long drm_alloc_pages(int order, int area); extern void drm_free_pages(unsigned long address, int order, int area); -extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); +extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern int drm_unbind_agp(DRM_AGP_MEM * handle); @@ -972,9 +972,9 @@ extern int drm_newctx(struct inode *inode, struct file *filp, extern int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_ctxbitmap_init(drm_device_t * dev); -extern void drm_ctxbitmap_cleanup(drm_device_t * dev); -extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle); +extern int drm_ctxbitmap_init(struct drm_device *dev); +extern void drm_ctxbitmap_cleanup(struct drm_device *dev); +extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); extern int drm_setsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -988,9 +988,9 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_update_drawable_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, +extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id); -extern void drm_drawable_free_all(drm_device_t *dev); +extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ extern int drm_getmagic(struct inode *inode, struct file *filp, @@ -1003,10 +1003,10 @@ extern int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); -extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); -extern void drm_idlelock_take(drm_lock_data_t *lock_data); -extern void drm_idlelock_release(drm_lock_data_t *lock_data); +extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); +extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); +extern void drm_idlelock_take(struct drm_lock_data *lock_data); +extern void drm_idlelock_release(struct drm_lock_data *lock_data); /* * These are exported to drivers so that they can implement fencing using @@ -1017,16 +1017,16 @@ extern int drm_i_have_hw_lock(struct file *filp); extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ -extern int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request); -extern int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request); -extern int drm_addbufs_fb (drm_device_t * dev, struct drm_buf_desc * request); -extern int drm_addmap(drm_device_t * dev, unsigned int offset, +extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr); extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); -extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map); +extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); +extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_addbufs(struct inode *inode, struct file *filp, @@ -1040,59 +1040,59 @@ extern int drm_freebufs(struct inode *inode, struct file *filp, extern int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_order(unsigned long size); -extern unsigned long drm_get_resource_start(drm_device_t *dev, +extern unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); -extern unsigned long drm_get_resource_len(drm_device_t *dev, +extern unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); -extern drm_map_list_t *drm_find_matching_map(drm_device_t *dev, - drm_local_map_t *map); +extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, + drm_local_map_t *map); /* DMA support (drm_dma.h) */ -extern int drm_dma_setup(drm_device_t * dev); -extern void drm_dma_takedown(drm_device_t * dev); -extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf); -extern void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp); +extern int drm_dma_setup(struct drm_device *dev); +extern void drm_dma_takedown(struct drm_device *dev); +extern void drm_free_buffer(struct drm_device *dev, drm_buf_t * buf); +extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); /* IRQ support (drm_irq.h) */ extern int drm_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); -extern int drm_irq_uninstall(drm_device_t *dev); -extern void drm_driver_irq_preinstall(drm_device_t * dev); -extern void drm_driver_irq_postinstall(drm_device_t * dev); -extern void drm_driver_irq_uninstall(drm_device_t * dev); +extern int drm_irq_uninstall(struct drm_device *dev); +extern void drm_driver_irq_preinstall(struct drm_device *dev); +extern void drm_driver_irq_postinstall(struct drm_device *dev); +extern void drm_driver_irq_uninstall(struct drm_device *dev); extern int drm_wait_vblank(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq); -extern void drm_vbl_send_signals(drm_device_t * dev); -extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*)); +extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); +extern void drm_vbl_send_signals(struct drm_device *dev); +extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); /* AGP/GART support (drm_agpsupport.h) */ -extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); -extern int drm_agp_acquire(drm_device_t * dev); +extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); +extern int drm_agp_acquire(struct drm_device *dev); extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_release(drm_device_t *dev); +extern int drm_agp_release(struct drm_device *dev); extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode); +extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info); +extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request); +extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request); +extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request); +extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request); +extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) @@ -1107,18 +1107,18 @@ extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); -extern int drm_put_dev(drm_device_t * dev); -extern int drm_put_head(drm_head_t * head); +extern int drm_put_dev(struct drm_device *dev); +extern int drm_put_head(struct drm_head * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; -extern drm_head_t **drm_heads; +extern struct drm_head **drm_heads; extern struct drm_sysfs_class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); /* Proc support (drm_proc.h) */ -extern int drm_proc_init(drm_device_t * dev, +extern int drm_proc_init(struct drm_device *dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root); @@ -1127,21 +1127,21 @@ extern int drm_proc_cleanup(int minor, struct proc_dir_entry *dev_root); /* Scatter Gather Support (drm_scatter.h) */ -extern void drm_sg_cleanup(drm_sg_mem_t * entry); +extern void drm_sg_cleanup(struct drm_sg_mem * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_sg_alloc(drm_device_t *dev, struct drm_scatter_gather * request); +extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ -extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); -extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info); -extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, +extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); -extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); -extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); +extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); +extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; @@ -1149,26 +1149,26 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name); extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head); + struct drm_head * head); extern void drm_sysfs_device_remove(struct class_device *class_dev); /* * Basic memory manager support (drm_mm.c) */ -extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size, +extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment); -extern void drm_mm_put_block(drm_mm_node_t *cur); -extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, +extern void drm_mm_put_block(struct drm_mm_node *cur); +extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match); -extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); -extern void drm_mm_takedown(drm_mm_t *mm); -extern int drm_mm_clean(drm_mm_t *mm); -extern unsigned long drm_mm_tail_space(drm_mm_t *mm); -extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size); -extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size); +extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); +extern void drm_mm_takedown(struct drm_mm *mm); +extern int drm_mm_clean(struct drm_mm *mm); +extern unsigned long drm_mm_tail_space(struct drm_mm *mm); +extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); +extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); -static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block) +static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) { return block->mm; } @@ -1179,14 +1179,14 @@ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token) { - drm_map_list_t *_entry; + struct drm_map_list *_entry; list_for_each_entry(_entry, &dev->maplist, head) if (_entry->user_token == token) return _entry->map; return NULL; } -static __inline__ int drm_device_is_agp(drm_device_t *dev) +static __inline__ int drm_device_is_agp(struct drm_device *dev) { if ( dev->driver->device_is_agp != NULL ) { int err = (*dev->driver->device_is_agp)( dev ); @@ -1199,7 +1199,7 @@ static __inline__ int drm_device_is_agp(drm_device_t *dev) return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); } -static __inline__ int drm_device_is_pcie(drm_device_t *dev) +static __inline__ int drm_device_is_pcie(struct drm_device *dev) { return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); } diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index c037defe..541d95cd 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -48,7 +48,7 @@ * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ -int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info) +int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; @@ -73,8 +73,8 @@ EXPORT_SYMBOL(drm_agp_info); int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_info info; int err; @@ -96,7 +96,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire(drm_device_t * dev) +int drm_agp_acquire(struct drm_device * dev) { #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) int retcode; @@ -134,9 +134,9 @@ EXPORT_SYMBOL(drm_agp_acquire); int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; - return drm_agp_acquire( (drm_device_t *) priv->head->dev ); + return drm_agp_acquire( (struct drm_device *) priv->head->dev ); } /** @@ -147,7 +147,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ -int drm_agp_release(drm_device_t *dev) +int drm_agp_release(struct drm_device *dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -165,8 +165,8 @@ EXPORT_SYMBOL(drm_agp_release); int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; return drm_agp_release(dev); } @@ -181,7 +181,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ -int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode) +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -201,8 +201,8 @@ EXPORT_SYMBOL(drm_agp_enable); int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_mode mode; @@ -224,9 +224,9 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ -int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request) +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; @@ -262,8 +262,8 @@ EXPORT_SYMBOL(drm_agp_alloc); int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_buffer request; struct drm_agp_buffer __user *argp = (void __user *)arg; int err; @@ -276,7 +276,7 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, return err; if (copy_to_user(argp, &request, sizeof(request))) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == request.handle) break; @@ -299,10 +299,10 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, * * Walks through drm_agp_head::memory until finding a matching handle. */ -static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, +static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) @@ -323,9 +323,9 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ -int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request) +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) @@ -345,8 +345,8 @@ EXPORT_SYMBOL(drm_agp_unbind); int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -370,9 +370,9 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ -int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request) +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int retcode; int page; @@ -396,8 +396,8 @@ EXPORT_SYMBOL(drm_agp_bind); int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -422,9 +422,9 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ -int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request) +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -446,8 +446,8 @@ EXPORT_SYMBOL(drm_agp_free); int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_buffer request; if (copy_from_user @@ -467,9 +467,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp, * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ -drm_agp_head_t *drm_agp_init(drm_device_t *dev) +struct drm_agp_head *drm_agp_init(struct drm_device *dev) { - drm_agp_head_t *head = NULL; + struct drm_agp_head *head = NULL; if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) return NULL; @@ -559,11 +559,11 @@ static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { } -static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, +static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages, struct page **pages) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); struct page **cur_page, **last_page = pages + num_pages; DRM_AGP_MEM *mem; @@ -594,8 +594,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, unsigned long offset, int cached) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; @@ -614,8 +614,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_DEBUG("drm_agp_unbind_ttm\n"); if (agp_be->mem->is_bound) @@ -626,8 +626,8 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; DRM_DEBUG("drm_agp_clear_ttm\n"); @@ -642,11 +642,11 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; if (backend) { DRM_DEBUG("drm_agp_destroy_ttm\n"); - agp_be = container_of(backend, drm_agp_ttm_backend_t, backend); + agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); if (agp_be) { if (agp_be->mem) { backend->func->clear(backend); @@ -666,10 +666,10 @@ static drm_ttm_backend_func_t agp_ttm_backend = .destroy = drm_agp_destroy_ttm, }; -drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev) +struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; struct agp_kern_info *info; if (!dev->agp) { diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4a80cf39..10d928ea 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -135,7 +135,7 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) static int drm_bo_add_ttm(drm_buffer_object_t * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; bo->ttm = NULL; @@ -168,7 +168,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, drm_bo_mem_reg_t * mem, int evict, int no_wait) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); @@ -294,7 +294,7 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; if (bo->fence) { @@ -329,7 +329,7 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -391,7 +391,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) static void drm_bo_destroy_locked(drm_buffer_object_t * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -438,7 +438,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) * Call dev->struct_mutex locked. */ -static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) +static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { drm_buffer_manager_t *bm = &dev->bm; @@ -470,12 +470,12 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; drm_buffer_manager_t *bm = &dev->bm; #else drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work); - drm_device_t *dev = container_of(bm, drm_device_t, bm); + struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif DRM_DEBUG("Delayed delete Worker\n"); @@ -505,7 +505,7 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) } } -static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) +static void drm_bo_base_deref_locked(struct drm_file * priv, drm_user_object_t * uo) { drm_buffer_object_t *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); @@ -519,7 +519,7 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) { struct drm_buffer_object *tmp_bo = *bo; - drm_device_t *dev = tmp_bo->dev; + struct drm_device *dev = tmp_bo->dev; *bo = NULL; if (atomic_dec_and_test(&tmp_bo->usage)) { @@ -535,13 +535,13 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) * and deregister fence object usage. */ -int drm_fence_buffer_objects(drm_file_t * priv, +int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, drm_fence_object_t * fence, drm_fence_object_t ** used_fence) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; @@ -639,7 +639,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, int no_wait) { int ret = 0; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_bo_mem_reg_t evict_mem; /* @@ -705,11 +705,11 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, return ret; } -static int drm_bo_mem_force_space(drm_device_t * dev, +static int drm_bo_mem_force_space(struct drm_device * dev, drm_bo_mem_reg_t * mem, uint32_t mem_type, int no_wait) { - drm_mm_node_t *node; + struct drm_mm_node *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; drm_mem_type_manager_t *man = &bm->man[mem_type]; @@ -794,7 +794,7 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, int drm_bo_mem_space(drm_buffer_object_t * bo, drm_bo_mem_reg_t * mem, int no_wait) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man; @@ -806,7 +806,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, int type_found = 0; int type_ok = 0; int has_eagain = 0; - drm_mm_node_t *node = NULL; + struct drm_mm_node *node = NULL; int ret; mem->mm_node = NULL; @@ -921,7 +921,7 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, +drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { drm_user_object_t *uo; @@ -1102,12 +1102,12 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, * unregistered. */ -static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, +static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1183,9 +1183,9 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) +static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; drm_ref_object_t *ro; int ret = 0; @@ -1215,7 +1215,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) * Call struct-sem locked. */ -static void drm_buffer_user_object_unmap(drm_file_t * priv, +static void drm_buffer_user_object_unmap(struct drm_file * priv, drm_user_object_t * uo, drm_ref_t action) { @@ -1241,7 +1241,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; int ret = 0; drm_bo_mem_reg_t mem; @@ -1318,7 +1318,7 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man; @@ -1364,7 +1364,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, uint32_t fence_class, int move_unfenced, int no_wait) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; drm_bo_driver_t *driver = dev->driver->bo_driver; uint32_t ftype; @@ -1489,7 +1489,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return 0; } -static int drm_bo_handle_validate(drm_file_t * priv, +static int drm_bo_handle_validate(struct drm_file * priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, @@ -1532,7 +1532,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, return ret; } -static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle, +static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; @@ -1554,7 +1554,7 @@ static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, +static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, uint32_t hint, struct drm_bo_info_rep *rep) { @@ -1587,7 +1587,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, return ret; } -int drm_buffer_object_create(drm_device_t *dev, +int drm_buffer_object_create(struct drm_device *dev, unsigned long size, enum drm_bo_type type, uint64_t mask, @@ -1672,10 +1672,10 @@ int drm_buffer_object_create(drm_device_t *dev, return ret; } -static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, +static int drm_bo_add_user_object(struct drm_file * priv, drm_buffer_object_t * bo, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -1693,7 +1693,7 @@ static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, return ret; } -static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) +static int drm_bo_lock_test(struct drm_device * dev, struct file *filp) { LOCK_TEST_WITH_RETURN(dev, filp); return 0; @@ -1973,7 +1973,7 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) *Call dev->struct_sem locked. */ -static void drm_bo_clean_unfenced(drm_device_t *dev) +static void drm_bo_clean_unfenced(struct drm_device *dev) { drm_buffer_manager_t *bm = &dev->bm; struct list_head *head, *list; @@ -2003,7 +2003,7 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, uint32_t mem_type, int free_pinned, int allow_errors) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; mutex_lock(&bo->mutex); @@ -2063,7 +2063,7 @@ static drm_buffer_object_t *drm_bo_entry(struct list_head *list, * dev->struct_mutex locked. */ -static int drm_bo_force_list_clean(drm_device_t * dev, +static int drm_bo_force_list_clean(struct drm_device * dev, struct list_head *head, unsigned mem_type, int free_pinned, @@ -2128,7 +2128,7 @@ restart: return 0; } -int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) +int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man = &bm->man[mem_type]; @@ -2170,7 +2170,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) *point since we have the hardware lock. */ -static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) +static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; drm_buffer_manager_t *bm = &dev->bm; @@ -2196,7 +2196,7 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return ret; } -int drm_bo_init_mm(drm_device_t * dev, +int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { @@ -2245,7 +2245,7 @@ EXPORT_SYMBOL(drm_bo_init_mm); * any clients still running when we set the initialized flag to zero. */ -int drm_bo_driver_finish(drm_device_t * dev) +int drm_bo_driver_finish(struct drm_device * dev) { drm_buffer_manager_t *bm = &dev->bm; int ret = 0; @@ -2296,7 +2296,7 @@ int drm_bo_driver_finish(drm_device_t * dev) return ret; } -int drm_bo_driver_init(drm_device_t * dev) +int drm_bo_driver_init(struct drm_device * dev) { drm_bo_driver_t *driver = dev->driver->bo_driver; drm_buffer_manager_t *bm = &dev->bm; @@ -2492,7 +2492,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; @@ -2526,7 +2526,7 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(drm_device_t * dev, +int drm_bo_pci_offset(struct drm_device * dev, drm_bo_mem_reg_t * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) @@ -2557,7 +2557,7 @@ int drm_bo_pci_offset(drm_device_t * dev, void drm_bo_unmap_virtual(drm_buffer_object_t * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; @@ -2569,9 +2569,9 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); if (list->user_token) { @@ -2595,9 +2595,9 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 8ef2a8ff..1e0d26ce 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -102,7 +102,7 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, void **virtual) { drm_buffer_manager_t *bm = &dev->bm; @@ -137,7 +137,7 @@ int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, drm_bo_mem_reg_t * mem, void *virtual) { drm_buffer_manager_t *bm; @@ -203,7 +203,7 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) int drm_bo_move_memcpy(drm_buffer_object_t * bo, int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; drm_ttm_t *ttm = bo->ttm; drm_bo_mem_reg_t *old_mem = &bo->mem; @@ -285,7 +285,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, drm_buffer_object_t ** new_obj) { drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); @@ -330,7 +330,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, uint32_t fence_type, uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; drm_bo_mem_reg_t *old_mem = &bo->mem; int ret; diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 3f34de0e..75eeafdd 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -36,21 +36,21 @@ #include #include "drmP.h" -unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { return pci_resource_start(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_start); -unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { return pci_resource_len(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_len); -drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) +struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && map->type == entry->map->type && ((entry->map->offset == map->offset) || @@ -63,7 +63,7 @@ drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_find_matching_map); -static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, +static int drm_map_handle(struct drm_device *dev, drm_hash_item_t *hash, unsigned long user_token, int hashed_handle) { int use_hashed_handle; @@ -101,7 +101,7 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * applicable and if supported by the kernel. */ -static int drm_addmap_core(drm_device_t * dev, unsigned int offset, +static int drm_addmap_core(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_map_list **maplist) @@ -213,7 +213,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } break; case _DRM_AGP: { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int valid = 0; if (!drm_core_has_AGP(dev)) { @@ -311,7 +311,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, return 0; } -int drm_addmap(drm_device_t * dev, unsigned int offset, +int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr) { @@ -329,10 +329,10 @@ EXPORT_SYMBOL(drm_addmap); int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map map; - drm_map_list_t *maplist; + struct drm_map_list *maplist; struct drm_map __user *argp = (void __user *)arg; int err; @@ -377,9 +377,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * * \sa drm_addmap */ -int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *r_list = NULL, *list_t; + struct drm_map_list *r_list = NULL, *list_t; drm_dma_handle_t dmah; int found = 0; @@ -434,7 +434,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_rmmap_locked); -int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { int ret; @@ -458,11 +458,11 @@ EXPORT_SYMBOL(drm_rmmap); int drm_rmmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map request; drm_local_map_t *map = NULL; - drm_map_list_t *r_list; + struct drm_map_list *r_list; int ret; if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { @@ -513,7 +513,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * entry) +static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry) { int i; @@ -550,7 +550,7 @@ static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * ent /** * Add AGP buffers for DMA transfers * - * \param dev drm_device_t to which the buffers are to be added. + * \param dev struct drm_device to which the buffers are to be added. * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * @@ -558,12 +558,12 @@ static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * ent * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request) +int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; - drm_agp_mem_t *agp_entry; - drm_buf_t *buf; + struct drm_agp_mem *agp_entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -728,9 +728,9 @@ int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request) EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ -int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request) +int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int count; int order; int size; @@ -954,9 +954,9 @@ int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request) } EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(drm_device_t * dev, struct drm_buf_desc * request) +static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; @@ -1116,9 +1116,9 @@ static int drm_addbufs_sg(drm_device_t * dev, struct drm_buf_desc * request) return 0; } -int drm_addbufs_fb(drm_device_t * dev, struct drm_buf_desc * request) +int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; @@ -1296,8 +1296,8 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_buf_desc request; - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1348,9 +1348,9 @@ int drm_addbufs(struct inode *inode, struct file *filp, int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_info request; struct drm_buf_info __user *argp = (void __user *)arg; int i; @@ -1386,7 +1386,7 @@ int drm_infobufs(struct inode *inode, struct file *filp, struct drm_buf_desc __user *to = &request.list[count]; struct drm_buf_entry *from = &dma->bufs[i]; - drm_freelist_t *list = &dma->bufs[i].freelist; + struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, sizeof(from->buf_count)) || @@ -1436,9 +1436,9 @@ int drm_infobufs(struct inode *inode, struct file *filp, int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_desc request; int order; struct drm_buf_entry *entry; @@ -1486,9 +1486,9 @@ int drm_markbufs(struct inode *inode, struct file *filp, int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_free request; int i; int idx; @@ -1542,9 +1542,9 @@ int drm_freebufs(struct inode *inode, struct file *filp, int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 6f066ac4..95d28898 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -56,7 +56,7 @@ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ -void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) +void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle) { mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); @@ -72,7 +72,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) * Allocate a new idr from drm_device::ctx_idr while holding the * drm_device::struct_mutex lock. */ -static int drm_ctxbitmap_next(drm_device_t * dev) +static int drm_ctxbitmap_next(struct drm_device *dev) { int new_id; int ret; @@ -101,7 +101,7 @@ again: * * Initialise the drm_device::ctx_idr */ -int drm_ctxbitmap_init(drm_device_t * dev) +int drm_ctxbitmap_init(struct drm_device *dev) { idr_init(&dev->ctx_idr); return 0; @@ -115,7 +115,7 @@ int drm_ctxbitmap_init(drm_device_t * dev) * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ -void drm_ctxbitmap_cleanup(drm_device_t * dev) +void drm_ctxbitmap_cleanup(struct drm_device *dev) { mutex_lock(&dev->struct_mutex); idr_remove_all(&dev->ctx_idr); @@ -143,8 +143,8 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev) int drm_getsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx_priv_map __user *argp = (void __user *)arg; struct drm_ctx_priv_map request; struct drm_map *map; @@ -194,8 +194,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp, int drm_setsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx_priv_map request; struct drm_map *map = NULL; struct drm_map_list *r_list = NULL; @@ -243,7 +243,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, * * Attempt to set drm_device::context_flag. */ -static int drm_context_switch(drm_device_t * dev, int old, int new) +static int drm_context_switch(struct drm_device *dev, int old, int new) { if (test_and_set_bit(0, &dev->context_flag)) { DRM_ERROR("Reentering -- FIXME\n"); @@ -271,7 +271,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new) * hardware lock is held, clears the drm_device::context_flag and wakes up * drm_device::context_wait. */ -static int drm_context_switch_complete(drm_device_t * dev, int new) +static int drm_context_switch_complete(struct drm_device *dev, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_switch = jiffies; @@ -338,7 +338,7 @@ int drm_resctx(struct inode *inode, struct file *filp, int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->head->dev; struct drm_ctx_list *ctx_entry; struct drm_ctx __user *argp = (void __user *)arg; @@ -434,8 +434,8 @@ int drm_getctx(struct inode *inode, struct file *filp, int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -459,8 +459,8 @@ int drm_switchctx(struct inode *inode, struct file *filp, int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -486,8 +486,8 @@ int drm_newctx(struct inode *inode, struct file *filp, int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -505,7 +505,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->handle == ctx.handle) { diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index a7eee1a4..6990f8d4 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -43,7 +43,7 @@ * * Allocate and initialize a drm_device_dma structure. */ -int drm_dma_setup(drm_device_t * dev) +int drm_dma_setup(struct drm_device * dev) { int i; @@ -67,9 +67,9 @@ int drm_dma_setup(drm_device_t * dev) * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the the drm_device::dma structure itself. */ -void drm_dma_takedown(drm_device_t * dev) +void drm_dma_takedown(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, j; if (!dma) @@ -129,7 +129,7 @@ void drm_dma_takedown(drm_device_t * dev) * * Resets the fields of \p buf. */ -void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) +void drm_free_buffer(struct drm_device * dev, drm_buf_t * buf) { if (!buf) return; @@ -152,9 +152,9 @@ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) * * Frees each buffer associated with \p filp not already on the hardware. */ -void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp) +void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 5a2a14f9..d6cdba56 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -177,7 +177,7 @@ error: /** * Caller must hold the drawable spinlock! */ -struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) { return idr_find(&dev->drw_idr, id); } @@ -196,7 +196,7 @@ static int drm_drawable_free(int idr, void *p, void *data) return 0; } -void drm_drawable_free_all(drm_device_t *dev) +void drm_drawable_free_all(struct drm_device *dev) { idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); idr_remove_all(&dev->drw_idr); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index fd817f88..84efbfe7 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -48,14 +48,14 @@ #include "drmP.h" #include "drm_core.h" -static void drm_cleanup(drm_device_t * dev); +static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /** Ioctl table */ -static drm_ioctl_desc_t drm_ioctls[] = { +static struct drm_ioctl_desc drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, @@ -168,11 +168,11 @@ static drm_ioctl_desc_t drm_ioctls[] = { * * \sa drm_device */ -int drm_lastclose(drm_device_t * dev) +int drm_lastclose(struct drm_device * dev) { - drm_magic_entry_t *pt, *next; - drm_map_list_t *r_list, *list_t; - drm_vma_entry_t *vma, *vma_temp; + struct drm_magic_entry *pt, *next; + struct drm_map_list *r_list, *list_t; + struct drm_vma_entry *vma, *vma_temp; int i; DRM_DEBUG("\n"); @@ -220,7 +220,7 @@ int drm_lastclose(drm_device_t * dev) /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp) { - drm_agp_mem_t *entry, *tempe; + struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ @@ -288,7 +288,7 @@ int drm_lastclose(drm_device_t * dev) void drm_cleanup_pci(struct pci_dev *pdev) { - drm_device_t *dev = pci_get_drvdata(pdev); + struct drm_device *dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); @@ -374,7 +374,7 @@ EXPORT_SYMBOL(drm_init); * * \sa drm_init */ -static void drm_cleanup(drm_device_t * dev) +static void drm_cleanup(struct drm_device * dev) { DRM_DEBUG("\n"); @@ -419,8 +419,8 @@ static void drm_cleanup(drm_device_t * dev) void drm_exit(struct drm_driver *driver) { int i; - drm_device_t *dev = NULL; - drm_head_t *head; + struct drm_device *dev = NULL; + struct drm_head *head; DRM_DEBUG("\n"); if (drm_fb_loaded) { @@ -548,8 +548,8 @@ module_exit(drm_core_exit); static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_version __user *argp = (void __user *)arg; struct drm_version version; int len; @@ -584,9 +584,9 @@ static int drm_version(struct inode *inode, struct file *filp, int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ioctl_desc_t *ioctl; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; @@ -635,7 +635,7 @@ EXPORT_SYMBOL(drm_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ccd9b19c..4f24b4b5 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,7 +34,7 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(drm_device_t * dev, uint32_t class, +void drm_fence_handler(struct drm_device * dev, uint32_t class, uint32_t sequence, uint32_t type) { int wake = 0; @@ -114,7 +114,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class, EXPORT_SYMBOL(drm_fence_handler); -static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) +static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { drm_fence_manager_t *fm = &dev->fm; unsigned long flags; @@ -180,7 +180,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } -static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base) +static void drm_fence_object_destroy(struct drm_file *priv, drm_user_object_t * base) { drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); @@ -262,7 +262,7 @@ int drm_fence_object_flush(drm_fence_object_t * fence, * wrapped around and reused. */ -void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *fc = &fm->class[class]; @@ -435,7 +435,7 @@ int drm_fence_object_emit(drm_fence_object_t * fence, return 0; } -static int drm_fence_object_init(drm_device_t * dev, uint32_t class, +static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, drm_fence_object_t * fence) @@ -471,10 +471,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -491,7 +491,7 @@ out: } EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, +int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, unsigned flags, drm_fence_object_t ** c_fence) { drm_fence_object_t *fence; @@ -514,7 +514,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, EXPORT_SYMBOL(drm_fence_object_create); -void drm_fence_manager_init(drm_device_t * dev) +void drm_fence_manager_init(struct drm_device * dev) { drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *class; @@ -544,13 +544,13 @@ void drm_fence_manager_init(drm_device_t * dev) write_unlock(&fm->lock); } -void drm_fence_manager_takedown(drm_device_t * dev) +void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) +drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_user_object_t *uo; drm_fence_object_t *fence; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index e54d5079..5ea3f9cf 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -39,9 +39,9 @@ #include static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev); + struct drm_device * dev); -static int drm_setup(drm_device_t * dev) +static int drm_setup(struct drm_device * dev) { drm_local_map_t *map; int i; @@ -128,7 +128,7 @@ static int drm_setup(drm_device_t * dev) */ int drm_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int retcode = 0; @@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_open); */ int drm_stub_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int err = -ENODEV; const struct file_operations *old_fops; @@ -232,10 +232,10 @@ static int drm_cpu_valid(void) * filp and add it into the double linked list in \p dev. */ static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev) + struct drm_device * dev) { int minor = iminor(inode); - drm_file_t *priv; + struct drm_file *priv; int ret; int i,j; @@ -320,8 +320,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, /** No-op. */ int drm_fasync(int fd, struct file *filp, int on) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode; DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, @@ -335,7 +335,7 @@ EXPORT_SYMBOL(drm_fasync); static void drm_object_release(struct file *filp) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct list_head *head; drm_user_object_t *user_object; drm_ref_object_t *ref_object; @@ -386,8 +386,8 @@ static void drm_object_release(struct file *filp) { */ int drm_release(struct inode *inode, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; int retcode = 0; lock_kernel(); @@ -466,7 +466,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->tag == priv && @@ -488,7 +488,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); drm_object_release(filp); if (priv->remove_auth_on_close == 1) { - drm_file_t *temp; + struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) temp->authenticated = 0; diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 02f70243..a7bacbb8 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -52,8 +52,8 @@ int drm_getunique(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_unique __user *argp = (void __user *)arg; struct drm_unique u; @@ -86,8 +86,8 @@ int drm_getunique(struct inode *inode, struct file *filp, int drm_setunique(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_unique u; int domain, bus, slot, func, ret; @@ -134,7 +134,7 @@ int drm_setunique(struct inode *inode, struct file *filp, return 0; } -static int drm_set_busid(drm_device_t * dev) +static int drm_set_busid(struct drm_device * dev) { int len; if (dev->unique != NULL) @@ -179,8 +179,8 @@ static int drm_set_busid(drm_device_t * dev) int drm_getmap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map __user *argp = (void __user *)arg; struct drm_map map; struct drm_map_list *r_list = NULL; @@ -201,7 +201,7 @@ int drm_getmap(struct inode *inode, struct file *filp, i = 0; list_for_each(list, &dev->maplist) { if (i == idx) { - r_list = list_entry(list, drm_map_list_t, head); + r_list = list_entry(list, struct drm_map_list, head); break; } i++; @@ -240,11 +240,11 @@ int drm_getmap(struct inode *inode, struct file *filp, int drm_getclient(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_client __user *argp = (struct drm_client __user *)arg; struct drm_client client; - drm_file_t *pt; + struct drm_file *pt; int idx; int i; @@ -289,8 +289,8 @@ int drm_getclient(struct inode *inode, struct file *filp, int drm_getstats(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_stats stats; int i; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 2e2c4d9c..140ceca6 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -53,8 +53,8 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_irq_busid __user *argp = (void __user *)arg; struct drm_irq_busid p; @@ -86,7 +86,7 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(drm_device_t * dev) +static int drm_irq_install(struct drm_device * dev) { int ret; unsigned long sh_flags = 0; @@ -154,7 +154,7 @@ static int drm_irq_install(drm_device_t * dev) * * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. */ -int drm_irq_uninstall(drm_device_t * dev) +int drm_irq_uninstall(struct drm_device * dev) { int irq_enabled; @@ -195,8 +195,8 @@ EXPORT_SYMBOL(drm_irq_uninstall); int drm_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ @@ -242,8 +242,8 @@ int drm_control(struct inode *inode, struct file *filp, */ int drm_wait_vblank(DRM_IOCTL_ARGS) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; union drm_wait_vblank __user *argp = (void __user *)data; union drm_wait_vblank vblwait; struct timeval now; @@ -292,7 +292,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) unsigned long irqflags; struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_sigs2 : &dev->vbl_sigs; - drm_vbl_sig_t *vbl_sig; + struct drm_vbl_sig *vbl_sig; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -322,7 +322,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if (! (vbl_sig = - drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) { + drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { return -ENOMEM; } @@ -369,7 +369,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * * If a signal is not requested, then calls vblank_wait(). */ -void drm_vbl_send_signals(drm_device_t * dev) +void drm_vbl_send_signals(struct drm_device * dev) { unsigned long flags; int i; @@ -377,7 +377,7 @@ void drm_vbl_send_signals(drm_device_t * dev) spin_lock_irqsave(&dev->vbl_lock, flags); for (i = 0; i < 2; i++) { - drm_vbl_sig_t *vbl_sig, *tmp; + struct drm_vbl_sig *vbl_sig, *tmp; struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : &dev->vbl_received); @@ -413,7 +413,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals); */ static void drm_locked_tasklet_func(unsigned long data) { - drm_device_t *dev = (drm_device_t*)data; + struct drm_device *dev = (struct drm_device*)data; unsigned long irqflags; spin_lock_irqsave(&dev->tasklet_lock, irqflags); @@ -450,7 +450,7 @@ static void drm_locked_tasklet_func(unsigned long data) * context, it must not make any assumptions about this. Also, the HW lock will * be held with the kernel context or any client context. */ -void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*)) +void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*)) { unsigned long irqflags; static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 6d348251..1ba01aab 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -51,8 +51,8 @@ static int drm_notifier(void *priv); int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; DECLARE_WAITQUEUE(entry, current); struct drm_lock lock; int ret = 0; @@ -152,8 +152,8 @@ int drm_lock(struct inode *inode, struct file *filp, int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_lock lock; unsigned long irqflags; @@ -202,7 +202,7 @@ int drm_unlock(struct inode *inode, struct file *filp, * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ -int drm_lock_take(drm_lock_data_t *lock_data, +int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -252,7 +252,7 @@ int drm_lock_take(drm_lock_data_t *lock_data, * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ -static int drm_lock_transfer(drm_lock_data_t *lock_data, +static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -278,7 +278,7 @@ static int drm_lock_transfer(drm_lock_data_t *lock_data, * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ -int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) +int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; @@ -320,7 +320,7 @@ int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) */ static int drm_notifier(void *priv) { - drm_sigdata_t *s = (drm_sigdata_t *) priv; + struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ @@ -351,7 +351,7 @@ static int drm_notifier(void *priv) * having to worry about starvation. */ -void drm_idlelock_take(drm_lock_data_t *lock_data) +void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; @@ -370,7 +370,7 @@ void drm_idlelock_take(drm_lock_data_t *lock_data) } EXPORT_SYMBOL(drm_idlelock_take); -void drm_idlelock_release(drm_lock_data_t *lock_data) +void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index b1423c12..454c33e8 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -214,7 +214,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; @@ -258,12 +258,12 @@ static void *agp_remap(unsigned long offset, unsigned long size, /** Wrapper around agp_allocate_memory() */ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(pages, type); } #else -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(dev->agp->bridge, pages, type); } @@ -289,7 +289,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) #else /* __OS_HAS_AGP*/ static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { return NULL; } diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 2caf596b..cf0d92fa 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -44,26 +44,26 @@ #include "drmP.h" #include -unsigned long drm_mm_tail_space(drm_mm_t *mm) +unsigned long drm_mm_tail_space(struct drm_mm *mm) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return 0; return entry->size; } -int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return -ENOMEM; @@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) } -static int drm_mm_create_tail_node(drm_mm_t *mm, +static int drm_mm_create_tail_node(struct drm_mm *mm, unsigned long start, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -98,13 +98,13 @@ static int drm_mm_create_tail_node(drm_mm_t *mm, } -int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) { return drm_mm_create_tail_node(mm, entry->start + entry->size, size); } @@ -112,12 +112,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) return 0; } -static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, +static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -137,12 +137,12 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, return child; } -drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, +struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment) { - drm_mm_node_t *align_splitoff = NULL; - drm_mm_node_t *child; + struct drm_mm_node *align_splitoff = NULL; + struct drm_mm_node *child; unsigned tmp = 0; if (alignment) @@ -173,26 +173,26 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, * Otherwise add to the free stack. */ -void drm_mm_put_block(drm_mm_node_t * cur) +void drm_mm_put_block(struct drm_mm_node * cur) { - drm_mm_t *mm = cur->mm; + struct drm_mm *mm = cur->mm; struct list_head *cur_head = &cur->ml_entry; struct list_head *root_head = &mm->ml_entry; - drm_mm_node_t *prev_node = NULL; - drm_mm_node_t *next_node; + struct drm_mm_node *prev_node = NULL; + struct drm_mm_node *next_node; int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry); + prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry); + next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; @@ -217,14 +217,14 @@ void drm_mm_put_block(drm_mm_node_t * cur) } EXPORT_SYMBOL(drm_mm_put_block); -drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, +struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, unsigned long size, unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; - drm_mm_node_t *entry; - drm_mm_node_t *best; + struct drm_mm_node *entry; + struct drm_mm_node *best; unsigned long best_size; unsigned wasted; @@ -232,7 +232,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, best_size = ~0UL; list_for_each(list, free_stack) { - entry = list_entry(list, drm_mm_node_t, fl_entry); + entry = list_entry(list, struct drm_mm_node, fl_entry); wasted = 0; if (entry->size < size) @@ -258,14 +258,14 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, return best; } -int drm_mm_clean(drm_mm_t * mm) +int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->ml_entry; return (head->next->next == head); } -int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) +int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); @@ -275,12 +275,12 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) EXPORT_SYMBOL(drm_mm_init); -void drm_mm_takedown(drm_mm_t * mm) +void drm_mm_takedown(struct drm_mm * mm) { struct list_head *bnode = mm->fl_entry.next; - drm_mm_node_t *entry; + struct drm_mm_node *entry; - entry = list_entry(bnode, drm_mm_node_t, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, fl_entry); if (entry->ml_entry.next != &mm->ml_entry || entry->fl_entry.next != &mm->fl_entry) { diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 567a7d2b..3c60605c 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,10 +30,10 @@ #include "drmP.h" -int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -51,9 +51,9 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, return 0; } -drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) +drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_hash_item_t *hash; int ret; drm_user_object_t *item; @@ -77,9 +77,9 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) return item; } -static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) +static void drm_deref_user_object(struct drm_file * priv, drm_user_object_t * item) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; if (atomic_dec_and_test(&item->refcount)) { @@ -90,7 +90,7 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) } } -int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) +int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item) { DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -105,7 +105,7 @@ int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) return 0; } -static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, +static int drm_object_ref_action(struct drm_file * priv, drm_user_object_t * ro, drm_ref_t action) { int ret = 0; @@ -124,7 +124,7 @@ static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, return ret; } -int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, +int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action) { int ret = 0; @@ -181,7 +181,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, return ret; } -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, +drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action) { @@ -197,7 +197,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, return drm_hash_entry(hash, drm_ref_object_t, hash); } -static void drm_remove_other_references(drm_file_t * priv, +static void drm_remove_other_references(struct drm_file * priv, drm_user_object_t * ro) { int i; @@ -214,7 +214,7 @@ static void drm_remove_other_references(drm_file_t * priv, } } -void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) +void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item) { int ret; drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; @@ -244,10 +244,10 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) } -int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, +int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type, drm_user_object_t ** object) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_user_object_t *uo; drm_hash_item_t *hash; int ret; @@ -274,10 +274,10 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, return ret; } -int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, +int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_user_object_t *uo; drm_ref_object_t *ro; int ret; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 4bd9047c..cfca5bf0 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -61,13 +61,13 @@ typedef struct drm_user_object { drm_object_type_t type; atomic_t refcount; int shareable; - drm_file_t *owner; - void (*ref_struct_locked) (drm_file_t * priv, + struct drm_file *owner; + void (*ref_struct_locked) (struct drm_file * priv, struct drm_user_object * obj, drm_ref_t ref_action); - void (*unref) (drm_file_t * priv, struct drm_user_object * obj, + void (*unref) (struct drm_file * priv, struct drm_user_object * obj, drm_ref_t unref_action); - void (*remove) (drm_file_t * priv, struct drm_user_object * obj); + void (*remove) (struct drm_file * priv, struct drm_user_object * obj); } drm_user_object_t; /* @@ -88,13 +88,13 @@ typedef struct drm_ref_object { * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +extern int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, int shareable); /** * Must be called with the struct_mutex held. */ -extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, +extern drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* @@ -104,13 +104,13 @@ extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item); +extern int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item); /* * Must be called with the struct_mutex held. May temporarily release it. */ -extern int drm_add_ref_object(drm_file_t * priv, +extern int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action); @@ -118,7 +118,7 @@ extern int drm_add_ref_object(drm_file_t * priv, * Must be called with the struct_mutex held. */ -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, +drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action); /* @@ -128,11 +128,11 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item); -extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, +extern void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item); +extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type, drm_user_object_t ** object); -extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, +extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type); /*************************************************** @@ -210,7 +210,7 @@ extern int drm_fence_object_wait(drm_fence_object_t * fence, extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, drm_fence_object_t ** c_fence); -extern int drm_fence_add_user_object(drm_file_t * priv, +extern int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, int shareable); extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); @@ -317,7 +317,7 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); */ typedef struct drm_bo_mem_reg { - drm_mm_node_t *mm_node; + struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; uint32_t page_alignment; @@ -353,14 +353,14 @@ typedef struct drm_buffer_object { struct mutex mutex; /* For pinned buffers */ - drm_mm_node_t *pinned_node; + struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; /* For vm */ drm_ttm_t *ttm; - drm_map_list_t map_list; + struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; uint32_t vm_flags; @@ -380,7 +380,7 @@ typedef struct drm_buffer_object { typedef struct drm_mem_type_manager { int has_type; int use_type; - drm_mm_t manager; + struct drm_mm manager; struct list_head lru; struct list_head pinned; uint32_t flags; @@ -403,7 +403,7 @@ typedef struct drm_buffer_manager { struct mutex evict_mutex; int nice_mode; int initialized; - drm_file_t *last_to_validate; + struct drm_file *last_to_validate; drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; @@ -462,7 +462,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev, extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); -extern int drm_fence_buffer_objects(drm_file_t * priv, +extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, drm_fence_object_t * fence, diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c index 76252204..a608eed3 100644 --- a/linux-core/drm_pci.c +++ b/linux-core/drm_pci.c @@ -47,7 +47,7 @@ /** * \brief Allocate a PCI consistent memory block, for DMA. */ -drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, +drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; @@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc); * * This function is for internal use in the Linux-specific DRM core code. */ -void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { unsigned long addr; size_t sz; @@ -167,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) /** * \brief Free a PCI consistent memory block */ -void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { __drm_pci_free(dev, dmah); kfree(dmah); diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index e59f2afa..f33bd93d 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -90,7 +90,7 @@ static struct drm_proc_list { * "/proc/dri/%minor%/", and each entry in proc_list as * "/proc/dri/%minor%/%name%". */ -int drm_proc_init(drm_device_t * dev, int minor, +int drm_proc_init(struct drm_device * dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root) { struct proc_dir_entry *ent; @@ -165,7 +165,7 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root, static int drm_name_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; if (offset > DRM_PROC_LIMIT) { @@ -207,7 +207,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, static int drm__vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; struct drm_map *map; struct drm_map_list *r_list; @@ -264,7 +264,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, static int drm_vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -287,10 +287,10 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request, static int drm__queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; int i; - drm_queue_t *q; + struct drm_queue *q; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -337,7 +337,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset, static int drm_queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -360,9 +360,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request, static int drm__bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma || offset > DRM_PROC_LIMIT) { @@ -409,7 +409,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request, static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -432,7 +432,7 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, static int drm__objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; drm_buffer_manager_t *bm = &dev->bm; drm_fence_manager_t *fm = &dev->fm; @@ -496,7 +496,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, static int drm_objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -519,9 +519,9 @@ static int drm_objects_info(char *buf, char **start, off_t offset, int request, static int drm__clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_file_t *priv; + struct drm_file *priv; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -552,7 +552,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset, static int drm_clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -566,9 +566,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset, static int drm__vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_vma_entry_t *pt; + struct drm_vma_entry *pt; struct vm_area_struct *vma; #if defined(__i386__) unsigned int pgprot; @@ -625,7 +625,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 138ae087..7c13610d 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL(drm_sg_alloc); int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct drm_scatter_gather __user *argp = (void __user *)arg; struct drm_scatter_gather request; int ret; @@ -214,8 +214,8 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_scatter_gather request; struct drm_sg_mem *entry; diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index e15db6d6..8e4bfbd8 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -88,8 +88,8 @@ EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { - drm_mm_t *mm = (drm_mm_t *) private; - drm_mm_node_t *tmp; + struct drm_mm *mm = (struct drm_mm *) private; + struct drm_mm_node *tmp; tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { @@ -101,21 +101,21 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size, static void drm_sman_mm_free(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { - drm_mm_t *mm = (drm_mm_t *) private; + struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); drm_free(mm, sizeof(*mm), DRM_MEM_MM); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } @@ -124,7 +124,7 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager, unsigned long start, unsigned long size) { drm_sman_mm_t *sman_mm; - drm_mm_t *mm; + struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index b96408ab..eba6deed 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -50,11 +50,11 @@ MODULE_PARM_DESC(debug, "Enable debug output"); module_param_named(cards_limit, drm_cards_limit, int, 0444); module_param_named(debug, drm_debug, int, 0600); -drm_head_t **drm_heads; +struct drm_head **drm_heads; struct drm_sysfs_class *drm_class; struct proc_dir_entry *drm_proc_root; -static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, +static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { @@ -160,9 +160,9 @@ error_out_unreg: * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ -static int drm_get_head(drm_device_t * dev, drm_head_t * head) +static int drm_get_head(struct drm_device * dev, struct drm_head * head) { - drm_head_t **heads = drm_heads; + struct drm_head **heads = drm_heads; int ret; int minor; @@ -171,7 +171,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) for (minor = 0; minor < drm_cards_limit; minor++, heads++) { if (!*heads) { - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = dev, .device = MKDEV(DRM_MAJOR, minor), .minor = minor, @@ -202,7 +202,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) err_g2: drm_proc_cleanup(minor, drm_proc_root, head->dev_root); err_g1: - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = NULL}; return ret; } @@ -221,7 +221,7 @@ err_g1: int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { - drm_device_t *dev; + struct drm_device *dev; int ret; DRM_DEBUG("\n"); @@ -282,7 +282,7 @@ EXPORT_SYMBOL(drm_get_dev); * "drm" data, otherwise unregisters the "drm" data, frees the dev list and * unregisters the character device. */ -int drm_put_dev(drm_device_t * dev) +int drm_put_dev(struct drm_device * dev) { DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); @@ -310,7 +310,7 @@ int drm_put_dev(drm_device_t * dev) * last minor released. * */ -int drm_put_head(drm_head_t * head) +int drm_put_head(struct drm_head * head) { int minor = head->minor; @@ -319,7 +319,7 @@ int drm_put_head(drm_head_t * head) drm_proc_cleanup(minor, drm_proc_root, head->dev_root); drm_sysfs_device_remove(head->dev_class); - *head = (drm_head_t){.dev = NULL}; + *head = (struct drm_head){.dev = NULL}; drm_heads[minor] = NULL; return 0; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 9b2f5dce..1090e719 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -123,7 +123,7 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs) static ssize_t show_dri(struct class_device *class_device, char *buf) { - drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev; + struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev; if (dev->driver->dri_library_name) return dev->driver->dri_library_name(dev, buf); return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); @@ -148,7 +148,7 @@ static struct class_device_attribute class_device_attrs[] = { * created with a call to drm_sysfs_create(). */ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head) + struct drm_head * head) { struct simple_dev *s_dev = NULL; int i, retval; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 7451adc5..de2fba1a 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -85,11 +85,11 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map *map = NULL; - drm_map_list_t *r_list; - drm_hash_item_t *hash; + struct drm_map_list *r_list; + struct drm_hash_item *hash; /* * Find the right map @@ -103,7 +103,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) goto vm_nopage_error; - r_list = drm_hash_entry(hash, drm_map_list_t, hash); + r_list = drm_hash_entry(hash, struct drm_map_list, hash); map = r_list->map; if (map && map->type == _DRM_AGP) { @@ -203,11 +203,11 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, */ static void drm_vm_shm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; struct drm_map *map; - drm_map_list_t *r_list; + struct drm_map_list *r_list; int found_maps = 0; DRM_DEBUG("0x%08lx,0x%08lx\n", @@ -285,9 +285,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; unsigned long offset; unsigned long page_nr; struct page *page; @@ -322,9 +322,9 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long address) { struct drm_map *map = (struct drm_map *) vma->vm_private_data; - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_sg_mem_t *entry = dev->sg; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_sg_mem *entry = dev->sg; unsigned long offset; unsigned long map_offset; unsigned long page_offset; @@ -418,9 +418,9 @@ static struct vm_operations_struct drm_vm_sg_ops = { */ static void drm_vm_open_locked(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *vma_entry; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *vma_entry; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -436,8 +436,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma) static void drm_vm_open(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; mutex_lock(&dev->struct_mutex); drm_vm_open_locked(vma); @@ -454,9 +454,9 @@ static void drm_vm_open(struct vm_area_struct *vma) */ static void drm_vm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -486,9 +486,9 @@ static void drm_vm_close(struct vm_area_struct *vma) */ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; - drm_device_dma_t *dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; + struct drm_device_dma *dma; unsigned long length = vma->vm_end - vma->vm_start; dev = priv->head->dev; @@ -555,8 +555,8 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); */ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map *map = NULL; unsigned long offset = 0; struct drm_hash_item *hash; @@ -585,7 +585,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } - map = drm_hash_entry(hash, drm_map_list_t, hash)->map; + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; @@ -676,8 +676,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) int drm_mmap(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -717,7 +717,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -816,7 +816,7 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); drm_bo_vm_open_locked(vma); @@ -832,7 +832,7 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_vm_close(vma); if (bo) { diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index a4e0c390..31dc1c86 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -46,9 +46,9 @@ #define I810_BUF_UNMAPPED 0 #define I810_BUF_MAPPED 1 -static inline void i810_print_status_page(drm_device_t * dev) +static inline void i810_print_status_page(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = dev->dev_private; u32 *temp = dev_priv->hw_status_page; int i; @@ -64,9 +64,9 @@ static inline void i810_print_status_page(drm_device_t * dev) } } -static drm_buf_t *i810_freelist_get(drm_device_t * dev) +static drm_buf_t *i810_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; int used; @@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) +static int i810_freelist_put(struct drm_device * dev, drm_buf_t * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -106,8 +106,8 @@ static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; drm_i810_private_t *dev_priv; drm_buf_t *buf; drm_i810_buf_priv_t *buf_priv; @@ -141,8 +141,8 @@ static const struct file_operations i810_buffer_fops = { static int i810_map_buffer(drm_buf_t * buf, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; const struct file_operations *old_fops; @@ -191,7 +191,7 @@ static int i810_unmap_buffer(drm_buf_t * buf) return retcode; } -static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, +static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, struct file *filp) { drm_buf_t *buf; @@ -221,9 +221,9 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, return retcode; } -static int i810_dma_cleanup(drm_device_t * dev) +static int i810_dma_cleanup(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -262,7 +262,7 @@ static int i810_dma_cleanup(drm_device_t * dev) return 0; } -static int i810_wait_ring(drm_device_t * dev, int n) +static int i810_wait_ring(struct drm_device * dev, int n) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -295,7 +295,7 @@ static int i810_wait_ring(drm_device_t * dev, int n) return iters; } -static void i810_kernel_lost_context(drm_device_t * dev) +static void i810_kernel_lost_context(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -307,9 +307,9 @@ static void i810_kernel_lost_context(drm_device_t * dev) ring->space += ring->Size; } -static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) +static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int my_idx = 24; u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); int i; @@ -342,7 +342,7 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) return 0; } -static int i810_dma_initialize(drm_device_t * dev, +static int i810_dma_initialize(struct drm_device * dev, drm_i810_private_t * dev_priv, drm_i810_init_t * init) { @@ -495,8 +495,8 @@ static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) static int i810_dma_init(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv; drm_i810_init_t init; int retcode = 0; @@ -553,7 +553,7 @@ static int i810_dma_init(struct inode *inode, struct file *filp, * Use 'volatile' & local var tmp to force the emitted values to be * identical to the verified ones. */ -static void i810EmitContextVerified(drm_device_t * dev, +static void i810EmitContextVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -586,7 +586,7 @@ static void i810EmitContextVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) +static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -619,7 +619,7 @@ static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) /* Need to do some additional checking when setting the dest buffer. */ -static void i810EmitDestVerified(drm_device_t * dev, +static void i810EmitDestVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -654,7 +654,7 @@ static void i810EmitDestVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitState(drm_device_t * dev) +static void i810EmitState(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -685,7 +685,7 @@ static void i810EmitState(drm_device_t * dev) /* need to verify */ -static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, +static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, unsigned int clear_color, unsigned int clear_zval) { @@ -760,7 +760,7 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, } } -static void i810_dma_dispatch_swap(drm_device_t * dev) +static void i810_dma_dispatch_swap(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -806,7 +806,7 @@ static void i810_dma_dispatch_swap(drm_device_t * dev) } } -static void i810_dma_dispatch_vertex(drm_device_t * dev, +static void i810_dma_dispatch_vertex(struct drm_device * dev, drm_buf_t * buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -886,7 +886,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev, } } -static void i810_dma_dispatch_flip(drm_device_t * dev) +static void i810_dma_dispatch_flip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; int pitch = dev_priv->pitch; @@ -933,7 +933,7 @@ static void i810_dma_dispatch_flip(drm_device_t * dev) } -static void i810_dma_quiescent(drm_device_t * dev) +static void i810_dma_quiescent(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -952,10 +952,10 @@ static void i810_dma_quiescent(drm_device_t * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(drm_device_t * dev) +static int i810_flush_queue(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, ret = 0; RING_LOCALS; @@ -987,9 +987,9 @@ static int i810_flush_queue(drm_device_t * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) +static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) @@ -1020,8 +1020,8 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) static int i810_flush_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1032,9 +1032,9 @@ static int i810_flush_ioctl(struct inode *inode, struct file *filp, static int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1068,8 +1068,8 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, static int i810_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_clear_t clear; if (copy_from_user @@ -1091,8 +1091,8 @@ static int i810_clear_bufs(struct inode *inode, struct file *filp, static int i810_swap_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; DRM_DEBUG("i810_swap_bufs\n"); @@ -1105,8 +1105,8 @@ static int i810_swap_bufs(struct inode *inode, struct file *filp, static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1119,8 +1119,8 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode = 0; drm_i810_dma_t d; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; @@ -1161,7 +1161,7 @@ static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device * dev, drm_buf_t * buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1224,9 +1224,9 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, static int i810_dma_mc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1255,8 +1255,8 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, static int i810_rstatus(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); @@ -1265,8 +1265,8 @@ static int i810_rstatus(struct inode *inode, struct file *filp, static int i810_ov0_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; drm_i810_overlay_t data; @@ -1281,8 +1281,8 @@ static int i810_ov0_info(struct inode *inode, struct file *filp, static int i810_fstatus(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1292,8 +1292,8 @@ static int i810_fstatus(struct inode *inode, struct file *filp, static int i810_ov0_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1305,7 +1305,7 @@ static int i810_ov0_flip(struct inode *inode, struct file *filp, /* Not sure why this isn't set all the time: */ -static void i810_do_init_pageflip(drm_device_t * dev) +static void i810_do_init_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1315,7 +1315,7 @@ static void i810_do_init_pageflip(drm_device_t * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static int i810_do_cleanup_pageflip(drm_device_t * dev) +static int i810_do_cleanup_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1330,8 +1330,8 @@ static int i810_do_cleanup_pageflip(drm_device_t * dev) static int i810_flip_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1345,7 +1345,7 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp, return 0; } -int i810_driver_load(drm_device_t *dev, unsigned long flags) +int i810_driver_load(struct drm_device *dev, unsigned long flags) { /* i810 has 4 more counters */ dev->counters += 4; @@ -1357,12 +1357,12 @@ int i810_driver_load(drm_device_t *dev, unsigned long flags) return 0; } -void i810_driver_lastclose(drm_device_t * dev) +void i810_driver_lastclose(struct drm_device * dev) { i810_dma_cleanup(dev); } -void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) +void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1372,18 +1372,18 @@ void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { i810_reclaim_buffers(dev, filp); } -int i810_driver_dma_quiescent(drm_device_t * dev) +int i810_driver_dma_quiescent(struct drm_device * dev) { i810_dma_quiescent(dev); return 0; } -drm_ioctl_desc_t i810_ioctls[] = { +struct drm_ioctl_desc i810_ioctls[] = { [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, @@ -1414,7 +1414,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); * \returns * A value of 1 is always retured to indictate every i810 is AGP. */ -int i810_driver_device_is_agp(drm_device_t * dev) +int i810_driver_device_is_agp(struct drm_device * dev) { return 1; } diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index dbe9d708..06eac774 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -115,17 +115,17 @@ typedef struct drm_i810_private { } drm_i810_private_t; /* i810_dma.c */ -extern int i810_driver_dma_quiescent(drm_device_t * dev); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, +extern int i810_driver_dma_quiescent(struct drm_device * dev); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp); extern int i810_driver_load(struct drm_device *, unsigned long flags); -extern void i810_driver_lastclose(drm_device_t * dev); -extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, +extern void i810_driver_lastclose(struct drm_device * dev); +extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp); -extern int i810_driver_device_is_agp(drm_device_t * dev); +extern int i810_driver_device_is_agp(struct drm_device * dev); -extern drm_ioctl_desc_t i810_ioctls[]; +extern struct drm_ioctl_desc i810_ioctls[]; extern int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 2850fb94..6aeccfcb 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,7 +33,7 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) +drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } @@ -47,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) return 0; } -int i915_invalidate_caches(drm_device_t * dev, uint64_t flags) +int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Only emit once per batchbuffer submission. @@ -63,7 +63,7 @@ int i915_invalidate_caches(drm_device_t * dev, uint64_t flags) return i915_emit_mi_flush(dev, flush_cmd); } -int i915_init_mem_type(drm_device_t * dev, uint32_t type, +int i915_init_mem_type(struct drm_device * dev, uint32_t type, drm_mem_type_manager_t * man) { switch (type) { @@ -116,7 +116,7 @@ uint32_t i915_evict_mask(drm_buffer_object_t *bo) } } -static void i915_emit_copy_blit(drm_device_t * dev, +static void i915_emit_copy_blit(struct drm_device * dev, uint32_t src_offset, uint32_t dst_offset, uint32_t pages, int direction) @@ -183,7 +183,7 @@ static int i915_move_blit(drm_buffer_object_t * bo, static int i915_move_flip(drm_buffer_object_t * bo, int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_bo_mem_reg_t tmp_mem; int ret; diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 00873485..a71e5dac 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -38,7 +38,7 @@ * Implements an intel sync flush operation. */ -static void i915_perform_flush(drm_device_t * dev) +static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_fence_manager_t *fm = &dev->fm; @@ -109,7 +109,7 @@ static void i915_perform_flush(drm_device_t * dev) } -void i915_poke_flush(drm_device_t * dev, uint32_t class) +void i915_poke_flush(struct drm_device * dev, uint32_t class) { drm_fence_manager_t *fm = &dev->fm; unsigned long flags; @@ -119,7 +119,7 @@ void i915_poke_flush(drm_device_t * dev, uint32_t class) write_unlock_irqrestore(&fm->lock, flags); } -int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -135,7 +135,7 @@ int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, return 0; } -void i915_fence_handler(drm_device_t * dev) +void i915_fence_handler(struct drm_device * dev) { drm_fence_manager_t *fm = &dev->fm; @@ -144,7 +144,7 @@ void i915_fence_handler(drm_device_t * dev) write_unlock(&fm->lock); } -int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) +int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) { /* * We have an irq that tells us when we have a new breadcrumb. diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index ef6f1e44..1eb6d9e6 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -36,7 +36,7 @@ #include "drm_pciids.h" -static int mga_driver_device_is_agp(drm_device_t * dev); +static int mga_driver_device_is_agp(struct drm_device * dev); static struct pci_device_id pciidlist[] = { mga_PCI_IDS @@ -127,7 +127,7 @@ MODULE_LICENSE("GPL and additional rights"); * \returns * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. */ -static int mga_driver_device_is_agp(drm_device_t * dev) +static int mga_driver_device_is_agp(struct drm_device * dev) { const struct pci_dev * const pdev = dev->pdev; diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index ac030d89..6c73b0d3 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -32,7 +32,7 @@ static struct pci_device_id pciidlist[] = { nouveau_PCI_IDS }; -extern drm_ioctl_desc_t nouveau_ioctls[]; +extern struct drm_ioctl_desc nouveau_ioctls[]; extern int nouveau_max_ioctl; static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c index 114ec8f9..b4c3f93b 100644 --- a/linux-core/sis_drv.c +++ b/linux-core/sis_drv.c @@ -36,7 +36,7 @@ static struct pci_device_id pciidlist[] = { }; -static int sis_driver_load(drm_device_t *dev, unsigned long chipset) +static int sis_driver_load(struct drm_device *dev, unsigned long chipset) { drm_sis_private_t *dev_priv; int ret; @@ -55,7 +55,7 @@ static int sis_driver_load(drm_device_t *dev, unsigned long chipset) return ret; } -static int sis_driver_unload(drm_device_t *dev) +static int sis_driver_unload(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 21c1f2d7..306ed453 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -122,7 +122,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) return 0; } -static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv, +static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, unsigned long data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; @@ -228,9 +228,9 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) return sis_drm_alloc(dev, priv, data, AGP_TYPE); } -static drm_local_map_t *sis_reg_init(drm_device_t *dev) +static drm_local_map_t *sis_reg_init(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; drm_local_map_t *map; list_for_each_entry(entry, &dev->maplist, head) { @@ -245,7 +245,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev) } int -sis_idle(drm_device_t *dev) +sis_idle(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; uint32_t idle_reg; @@ -314,10 +314,10 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { @@ -334,7 +334,7 @@ void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) return; } -drm_ioctl_desc_t sis_ioctls[] = { +struct drm_ioctl_desc sis_ioctls[] = { [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index 86883998..e452611d 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,7 +32,7 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev) +drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } @@ -43,7 +43,7 @@ int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) return 0; } -int via_invalidate_caches(drm_device_t * dev, uint64_t flags) +int via_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Invalidate texture caches here. @@ -53,7 +53,7 @@ int via_invalidate_caches(drm_device_t * dev, uint64_t flags) } -static int via_vram_info(drm_device_t *dev, +static int via_vram_info(struct drm_device *dev, unsigned long *offset, unsigned long *size) { @@ -81,7 +81,7 @@ static int via_vram_info(drm_device_t *dev, return 0; } -int via_init_mem_type(drm_device_t * dev, uint32_t type, +int via_init_mem_type(struct drm_device * dev, uint32_t type, drm_mem_type_manager_t * man) { switch (type) { diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 2f508374..5108c867 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -206,7 +206,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) */ static void -via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) +via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) } static void -via_abort_dmablit(drm_device_t *dev, int engine) +via_abort_dmablit(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -294,7 +294,7 @@ via_abort_dmablit(drm_device_t *dev, int engine) } static void -via_dmablit_engine_off(drm_device_t *dev, int engine) +via_dmablit_engine_off(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -311,7 +311,7 @@ via_dmablit_engine_off(drm_device_t *dev, int engine) */ void -via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) +via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; @@ -432,7 +432,7 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que */ static int -via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -465,7 +465,7 @@ static void via_dmablit_timer(unsigned long data) { drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; int engine = (int) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); @@ -509,7 +509,7 @@ via_dmablit_workqueue(struct work_struct *work) #else drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); #endif - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; unsigned long irqsave; drm_via_sg_info_t *cur_sg; int cur_released; @@ -552,7 +552,7 @@ via_dmablit_workqueue(struct work_struct *work) void -via_init_dmablit(drm_device_t *dev) +via_init_dmablit(struct drm_device *dev) { int i,j; drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -594,7 +594,7 @@ via_init_dmablit(drm_device_t *dev) static int -via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) +via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int draw = xfer->to_fb; int ret = 0; @@ -740,7 +740,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) static int -via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_sg_info_t *vsg; diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h index f6ae03ec..726ad25d 100644 --- a/linux-core/via_dmablit.h +++ b/linux-core/via_dmablit.h @@ -59,7 +59,7 @@ typedef struct _drm_via_sg_info { } drm_via_sg_info_t; typedef struct _drm_via_blitq { - drm_device_t *dev; + struct drm_device *dev; uint32_t cur_blit_handle; uint32_t done_blit_handle; unsigned serviced; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index 02249939..ce4366d2 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -39,7 +39,7 @@ */ -static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) +static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_fence_class_manager_t *fc = &dev->fm.class[class]; @@ -113,7 +113,7 @@ static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) * Emit a fence sequence. */ -int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -152,7 +152,7 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, * Manual poll (from the fence manager). */ -void via_poke_flush(drm_device_t * dev, uint32_t class) +void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_fence_manager_t *fm = &dev->fm; @@ -200,7 +200,7 @@ int via_fence_has_irq(struct drm_device * dev, uint32_t class, void via_fence_timer(unsigned long data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_fence_manager_t *fm = &dev->fm; uint32_t pending_flush; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index d97269f5..48f5fd09 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -190,10 +190,10 @@ int via_mem_free(DRM_IOCTL_ARGS) } -void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { drm_via_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index f7d3fab4..fbad27c0 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -47,7 +47,7 @@ * the head pointer changes, so that EBUSY only happens if the ring * actually stalls for (eg) 3 seconds. */ -int i915_wait_ring(drm_device_t * dev, int n, const char *caller) +int i915_wait_ring(struct drm_device * dev, int n, const char *caller) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); @@ -74,7 +74,7 @@ int i915_wait_ring(drm_device_t * dev, int n, const char *caller) return DRM_ERR(EBUSY); } -void i915_kernel_lost_context(drm_device_t * dev) +void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); @@ -89,7 +89,7 @@ void i915_kernel_lost_context(drm_device_t * dev) dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } -static int i915_dma_cleanup(drm_device_t * dev) +static int i915_dma_cleanup(struct drm_device * dev) { /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -125,7 +125,7 @@ static int i915_dma_cleanup(drm_device_t * dev) return 0; } -static int i915_initialize(drm_device_t * dev, +static int i915_initialize(struct drm_device * dev, drm_i915_private_t * dev_priv, drm_i915_init_t * init) { @@ -212,7 +212,7 @@ static int i915_initialize(drm_device_t * dev, return 0; } -static int i915_dma_resume(drm_device_t * dev) +static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -358,7 +358,7 @@ static int validate_cmd(int cmd) return ret; } -static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) +static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -397,7 +397,7 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) return 0; } -static int i915_emit_box(drm_device_t * dev, +static int i915_emit_box(struct drm_device * dev, struct drm_clip_rect __user * boxes, int i, int DR1, int DR4) { @@ -440,7 +440,7 @@ static int i915_emit_box(drm_device_t * dev, * emit. For now, do it in both places: */ -void i915_emit_breadcrumb(drm_device_t *dev) +void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -461,7 +461,7 @@ void i915_emit_breadcrumb(drm_device_t *dev) } -int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) +int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t flush_cmd = CMD_MI_FLUSH; @@ -482,7 +482,7 @@ int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) } -static int i915_dispatch_cmdbuffer(drm_device_t * dev, +static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t * cmd) { #ifdef I915_HAVE_FENCE @@ -520,7 +520,7 @@ static int i915_dispatch_cmdbuffer(drm_device_t * dev, return 0; } -static int i915_dispatch_batchbuffer(drm_device_t * dev, +static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -568,7 +568,7 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev, return 0; } -static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync) +static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; u32 num_pages, current_page, next_page, dspbase; @@ -620,7 +620,7 @@ static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync) dev_priv->sarea_priv->pf_current_page |= next_page << shift; } -void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync) +void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -642,7 +642,7 @@ void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync) #endif } -static int i915_quiescent(drm_device_t * dev) +static int i915_quiescent(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -727,7 +727,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) return 0; } -static int i915_do_cleanup_pageflip(drm_device_t * dev) +static int i915_do_cleanup_pageflip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; int i, pipes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; @@ -939,7 +939,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) return 0; } -int i915_driver_load(drm_device_t *dev, unsigned long flags) +int i915_driver_load(struct drm_device *dev, unsigned long flags) { /* i915 has 4 more counters */ dev->counters += 4; @@ -951,7 +951,7 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags) return 0; } -void i915_driver_lastclose(drm_device_t * dev) +void i915_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -961,7 +961,7 @@ void i915_driver_lastclose(drm_device_t * dev) i915_dma_cleanup(dev); } -void i915_driver_preclose(drm_device_t * dev, DRMFILE filp) +void i915_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -969,7 +969,7 @@ void i915_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -drm_ioctl_desc_t i915_ioctls[] = { +struct drm_ioctl_desc i915_ioctls[] = { [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, @@ -1003,7 +1003,7 @@ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); * \returns * A value of 1 is always retured to indictate every i9x5 is AGP. */ -int i915_driver_device_is_agp(drm_device_t * dev) +int i915_driver_device_is_agp(struct drm_device * dev) { return 1; } diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e19d372a..1ed37c63 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -141,35 +141,35 @@ enum intel_chip_family { CHIP_I965 = 0x08, }; -extern drm_ioctl_desc_t i915_ioctls[]; +extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; /* i915_dma.c */ -extern void i915_kernel_lost_context(drm_device_t * dev); +extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); -extern void i915_driver_lastclose(drm_device_t * dev); -extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern int i915_driver_device_is_agp(drm_device_t * dev); +extern void i915_driver_lastclose(struct drm_device * dev); +extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -extern void i915_emit_breadcrumb(drm_device_t *dev); -extern void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync); -extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush); +extern void i915_emit_breadcrumb(struct drm_device *dev); +extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync); +extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); extern int i915_driver_firstopen(struct drm_device *dev); /* i915_irq.c */ extern int i915_irq_emit(DRM_IOCTL_ARGS); extern int i915_irq_wait(DRM_IOCTL_ARGS); -extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); -extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence); +extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); +extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); -extern void i915_driver_irq_preinstall(drm_device_t * dev); -extern void i915_driver_irq_postinstall(drm_device_t * dev); -extern void i915_driver_irq_uninstall(drm_device_t * dev); +extern void i915_driver_irq_preinstall(struct drm_device * dev); +extern void i915_driver_irq_postinstall(struct drm_device * dev); +extern void i915_driver_irq_uninstall(struct drm_device * dev); extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); -extern int i915_emit_irq(drm_device_t * dev); +extern int i915_emit_irq(struct drm_device * dev); extern void i915_user_irq_on(drm_i915_private_t *dev_priv); extern void i915_user_irq_off(drm_i915_private_t *dev_priv); extern int i915_vblank_swap(DRM_IOCTL_ARGS); @@ -180,27 +180,27 @@ extern int i915_mem_free(DRM_IOCTL_ARGS); extern int i915_mem_init_heap(DRM_IOCTL_ARGS); extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); extern void i915_mem_takedown(struct mem_block **heap); -extern void i915_mem_release(drm_device_t * dev, +extern void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap); #ifdef I915_HAVE_FENCE /* i915_fence.c */ -extern void i915_fence_handler(drm_device_t *dev); -extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class, +extern void i915_fence_handler(struct drm_device *dev); +extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, uint32_t flags, uint32_t *sequence, uint32_t *native_type); -extern void i915_poke_flush(drm_device_t *dev, uint32_t class); -extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags); +extern void i915_poke_flush(struct drm_device *dev, uint32_t class); +extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags); #endif #ifdef I915_HAVE_BUFFER /* i915_buffer.c */ -extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev); +extern drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device *dev); extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *type); -extern int i915_invalidate_caches(drm_device_t *dev, uint64_t buffer_flags); -extern int i915_init_mem_type(drm_device_t *dev, uint32_t type, +extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, drm_mem_type_manager_t *man); extern uint32_t i915_evict_mask(drm_buffer_object_t *bo); extern int i915_move(drm_buffer_object_t *bo, int evict, @@ -245,7 +245,7 @@ extern int i915_move(drm_buffer_object_t *bo, int evict, I915_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); +extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index eb32e194..17cccac3 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -43,7 +43,7 @@ * This function must be called with the drawable spinlock held. */ static void -i915_dispatch_vsync_flip(drm_device_t *dev, struct drm_drawable_info *drw, +i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -87,7 +87,7 @@ i915_dispatch_vsync_flip(drm_device_t *dev, struct drm_drawable_info *drw, * * This function will be called with the HW lock held. */ -static void i915_vblank_tasklet(drm_device_t *dev) +static void i915_vblank_tasklet(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -277,7 +277,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u16 temp; u32 pipea_stats, pipeb_stats; @@ -339,7 +339,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_HANDLED; } -int i915_emit_irq(drm_device_t * dev) +int i915_emit_irq(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -383,7 +383,7 @@ void i915_user_irq_off(drm_i915_private_t *dev_priv) } -static int i915_wait_irq(drm_device_t * dev, int irq_nr) +static int i915_wait_irq(struct drm_device * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = 0; @@ -411,7 +411,7 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr) return ret; } -static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence, +static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, atomic_t *counter) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -432,12 +432,12 @@ static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence, return ret; } -int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) { return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); } -int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence) +int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) { return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); } @@ -490,7 +490,7 @@ int i915_irq_wait(DRM_IOCTL_ARGS) return i915_wait_irq(dev, irqwait.irq_seq); } -static void i915_enable_interrupt (drm_device_t *dev) +static void i915_enable_interrupt (struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -703,7 +703,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) /* drm_dma.h hooks */ -void i915_driver_irq_preinstall(drm_device_t * dev) +void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -712,7 +712,7 @@ void i915_driver_irq_preinstall(drm_device_t * dev) I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); } -void i915_driver_irq_postinstall(drm_device_t * dev) +void i915_driver_irq_postinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -733,7 +733,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev) I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21)); } -void i915_driver_irq_uninstall(drm_device_t * dev) +void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u16 temp; diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index c66edfab..582687ad 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -43,7 +43,7 @@ * block to allocate, and the ring is drained prior to allocations -- * in other words allocation is expensive. */ -static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use) +static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -208,7 +208,7 @@ static int init_heap(struct mem_block **heap, int start, int size) /* Free all blocks associated with the releasing file. */ -void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap) +void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap) { struct mem_block *p; diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index 47340165..c787260a 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -572,7 +572,7 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv) * DMA operation. It is left here since it so tricky to get DMA operating * properly in some architectures and hardware. */ -static int mach64_bm_dma_test(drm_device_t * dev) +static int mach64_bm_dma_test(struct drm_device * dev) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_dma_handle_t *cpu_addr_dmah; @@ -752,7 +752,7 @@ static int mach64_bm_dma_test(drm_device_t * dev) * Called during the DMA initialization ioctl to initialize all the necessary * software and hardware state for DMA operation. */ -static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init) +static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) { drm_mach64_private_t *dev_priv; u32 tmp; @@ -1117,7 +1117,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv) /** \name DMA cleanup */ /*@{*/ -int mach64_do_cleanup_dma(drm_device_t * dev) +int mach64_do_cleanup_dma(struct drm_device * dev) { DRM_DEBUG("%s\n", __FUNCTION__); @@ -1223,9 +1223,9 @@ int mach64_engine_reset(DRM_IOCTL_ARGS) /** \name Freelist management */ /*@{*/ -int mach64_init_freelist(drm_device_t * dev) +int mach64_init_freelist(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_freelist_t *entry; struct list_head *ptr; @@ -1249,7 +1249,7 @@ int mach64_init_freelist(drm_device_t * dev) return 0; } -void mach64_destroy_freelist(drm_device_t * dev) +void mach64_destroy_freelist(struct drm_device * dev) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_freelist_t *entry; @@ -1461,7 +1461,7 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf) /** \name DMA buffer request and submission IOCTL handler */ /*@{*/ -static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { int i; @@ -1530,7 +1530,7 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) return ret; } -void mach64_driver_lastclose(drm_device_t * dev) +void mach64_driver_lastclose(struct drm_device * dev) { mach64_do_cleanup_dma(dev); } diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index bb8b309e..31b8247a 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -108,7 +108,7 @@ typedef struct drm_mach64_private { drm_local_map_t *agp_textures; } drm_mach64_private_t; -extern drm_ioctl_desc_t mach64_ioctls[]; +extern struct drm_ioctl_desc mach64_ioctls[]; extern int mach64_max_ioctl; /* mach64_dma.c */ @@ -117,10 +117,10 @@ extern int mach64_dma_idle(DRM_IOCTL_ARGS); extern int mach64_dma_flush(DRM_IOCTL_ARGS); extern int mach64_engine_reset(DRM_IOCTL_ARGS); extern int mach64_dma_buffers(DRM_IOCTL_ARGS); -extern void mach64_driver_lastclose(drm_device_t * dev); +extern void mach64_driver_lastclose(struct drm_device * dev); -extern int mach64_init_freelist(drm_device_t * dev); -extern void mach64_destroy_freelist(drm_device_t * dev); +extern int mach64_init_freelist(struct drm_device * dev); +extern void mach64_destroy_freelist(struct drm_device * dev); extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv); extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf); @@ -137,7 +137,7 @@ extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv); extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv); extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv); -extern int mach64_do_cleanup_dma(drm_device_t * dev); +extern int mach64_do_cleanup_dma(struct drm_device * dev); /* mach64_state.c */ extern int mach64_dma_clear(DRM_IOCTL_ARGS); @@ -145,13 +145,13 @@ extern int mach64_dma_swap(DRM_IOCTL_ARGS); extern int mach64_dma_vertex(DRM_IOCTL_ARGS); extern int mach64_dma_blit(DRM_IOCTL_ARGS); extern int mach64_get_param(DRM_IOCTL_ARGS); -extern int mach64_driver_vblank_wait(drm_device_t * dev, +extern int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS); -extern void mach64_driver_irq_preinstall(drm_device_t * dev); -extern void mach64_driver_irq_postinstall(drm_device_t * dev); -extern void mach64_driver_irq_uninstall(drm_device_t * dev); +extern void mach64_driver_irq_preinstall(struct drm_device * dev); +extern void mach64_driver_irq_postinstall(struct drm_device * dev); +extern void mach64_driver_irq_uninstall(struct drm_device * dev); /* ================================================================ * Registers diff --git a/shared-core/mach64_irq.c b/shared-core/mach64_irq.c index 663642db..4122dd91 100644 --- a/shared-core/mach64_irq.c +++ b/shared-core/mach64_irq.c @@ -42,7 +42,7 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; int status; @@ -70,7 +70,7 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { unsigned int cur_vblank; int ret = 0; @@ -90,7 +90,7 @@ int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) /* drm_dma.h hooks */ -void mach64_driver_irq_preinstall(drm_device_t * dev) +void mach64_driver_irq_preinstall(struct drm_device * dev) { drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; @@ -104,7 +104,7 @@ void mach64_driver_irq_preinstall(drm_device_t * dev) | MACH64_CRTC_VBLANK_INT); } -void mach64_driver_irq_postinstall(drm_device_t * dev) +void mach64_driver_irq_postinstall(struct drm_device * dev) { drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; @@ -118,7 +118,7 @@ void mach64_driver_irq_postinstall(drm_device_t * dev) } -void mach64_driver_irq_uninstall(drm_device_t * dev) +void mach64_driver_irq_uninstall(struct drm_device * dev) { drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index a1047cbd..4e8291af 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -40,7 +40,7 @@ * 1.0 - Initial mach64 DRM * */ -drm_ioctl_desc_t mach64_ioctls[] = { +struct drm_ioctl_desc mach64_ioctls[] = { [DRM_IOCTL_NR(DRM_MACH64_INIT)] = {mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = {mach64_dma_clear, DRM_AUTH}, [DRM_IOCTL_NR(DRM_MACH64_SWAP)] = {mach64_dma_swap, DRM_AUTH}, @@ -212,7 +212,7 @@ static __inline__ int mach64_emit_state(DRMFILE filp, * DMA command dispatch functions */ -static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, unsigned int flags, int cx, int cy, int cw, int ch, unsigned int clear_color, @@ -355,7 +355,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev, return 0; } -static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev) +static int mach64_dma_dispatch_swap(DRMFILE filp, struct drm_device * dev) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -545,7 +545,7 @@ static __inline__ int copy_from_user_vertex(u32 *to, } } -static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, drm_mach64_vertex_t * vertex) { drm_mach64_private_t *dev_priv = dev->dev_private; @@ -640,7 +640,7 @@ static __inline__ int copy_from_user_blit(u32 *to, return 0; } -static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_dispatch_blit(DRMFILE filp, struct drm_device * dev, drm_mach64_blit_t * blit) { drm_mach64_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index ea6212fe..0a3c2729 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -46,7 +46,7 @@ #define MINIMAL_CLEANUP 0 #define FULL_CLEANUP 1 -static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup); +static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup); /* ================================================================ * Engine control @@ -224,7 +224,7 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) #define MGA_BUFFER_FREE 0 #if MGA_FREELIST_DEBUG -static void mga_freelist_print(drm_device_t * dev) +static void mga_freelist_print(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; @@ -245,10 +245,10 @@ static void mga_freelist_print(drm_device_t * dev) } #endif -static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv) +static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) { - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; @@ -291,7 +291,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv) return 0; } -static void mga_freelist_cleanup(drm_device_t * dev) +static void mga_freelist_cleanup(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; @@ -311,7 +311,7 @@ static void mga_freelist_cleanup(drm_device_t * dev) #if 0 /* FIXME: Still needed? */ -static void mga_freelist_reset(drm_device_t * dev) +static void mga_freelist_reset(struct drm_device * dev) { drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; @@ -326,7 +326,7 @@ static void mga_freelist_reset(drm_device_t * dev) } #endif -static drm_buf_t *mga_freelist_get(drm_device_t * dev) +static drm_buf_t *mga_freelist_get(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *next; @@ -359,7 +359,7 @@ static drm_buf_t *mga_freelist_get(drm_device_t * dev) return NULL; } -int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf) +int mga_freelist_put(struct drm_device * dev, drm_buf_t * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -393,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf) * DMA initialization, cleanup */ -int mga_driver_load(drm_device_t *dev, unsigned long flags) +int mga_driver_load(struct drm_device *dev, unsigned long flags) { drm_mga_private_t * dev_priv; @@ -433,7 +433,7 @@ int mga_driver_load(drm_device_t *dev, unsigned long flags) * * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap */ -static int mga_do_agp_dma_bootstrap(drm_device_t * dev, +static int mga_do_agp_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -548,7 +548,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, #ifdef __linux__ { - drm_map_list_t *_entry; + struct drm_map_list *_entry; unsigned long agp_token = 0; list_for_each_entry(_entry, &dev->maplist, head) { @@ -603,7 +603,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, * * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap */ -static int mga_do_pci_dma_bootstrap(drm_device_t * dev, +static int mga_do_pci_dma_bootstrap(struct drm_device * dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -696,7 +696,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev, } -static int mga_do_dma_bootstrap(drm_device_t * dev, +static int mga_do_dma_bootstrap(struct drm_device * dev, drm_mga_dma_bootstrap_t * dma_bs) { const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); @@ -799,7 +799,7 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS) } -static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init) +static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) { drm_mga_private_t *dev_priv; int ret; @@ -939,7 +939,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init) return 0; } -static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup) +static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) { int err = 0; DRM_DEBUG("\n"); @@ -1089,7 +1089,7 @@ int mga_dma_reset(DRM_IOCTL_ARGS) * DMA buffer management */ -static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, struct drm_dma * d) +static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { drm_buf_t *buf; int i; @@ -1116,7 +1116,7 @@ static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, struct drm_dma int mga_dma_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; struct drm_dma __user *argp = (void __user *)data; struct drm_dma d; @@ -1158,7 +1158,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) /** * Called just before the module is unloaded. */ -int mga_driver_unload(drm_device_t * dev) +int mga_driver_unload(struct drm_device * dev) { drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; @@ -1169,12 +1169,12 @@ int mga_driver_unload(drm_device_t * dev) /** * Called when the last opener of the device is closed. */ -void mga_driver_lastclose(drm_device_t * dev) +void mga_driver_lastclose(struct drm_device * dev) { mga_do_cleanup_dma(dev, FULL_CLEANUP); } -int mga_driver_dma_quiescent(drm_device_t * dev) +int mga_driver_dma_quiescent(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; return mga_do_wait_for_idle(dev_priv); diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h index bce82135..e1fdf403 100644 --- a/shared-core/mga_drv.h +++ b/shared-core/mga_drv.h @@ -148,7 +148,7 @@ typedef struct drm_mga_private { unsigned int agp_size; } drm_mga_private_t; -extern drm_ioctl_desc_t mga_ioctls[]; +extern struct drm_ioctl_desc mga_ioctls[]; extern int mga_max_ioctl; /* mga_dma.c */ @@ -157,10 +157,10 @@ extern int mga_dma_init(DRM_IOCTL_ARGS); extern int mga_dma_flush(DRM_IOCTL_ARGS); extern int mga_dma_reset(DRM_IOCTL_ARGS); extern int mga_dma_buffers(DRM_IOCTL_ARGS); -extern int mga_driver_load(drm_device_t *dev, unsigned long flags); -extern int mga_driver_unload(drm_device_t * dev); -extern void mga_driver_lastclose(drm_device_t * dev); -extern int mga_driver_dma_quiescent(drm_device_t * dev); +extern int mga_driver_load(struct drm_device *dev, unsigned long flags); +extern int mga_driver_unload(struct drm_device * dev); +extern void mga_driver_lastclose(struct drm_device * dev); +extern int mga_driver_dma_quiescent(struct drm_device * dev); extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); @@ -168,7 +168,7 @@ extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); -extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf); +extern int mga_freelist_put(struct drm_device * dev, drm_buf_t * buf); /* mga_warp.c */ extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); @@ -176,12 +176,12 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); extern int mga_warp_init(drm_mga_private_t * dev_priv); /* mga_irq.c */ -extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence); -extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); +extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); -extern void mga_driver_irq_preinstall(drm_device_t * dev); -extern void mga_driver_irq_postinstall(drm_device_t * dev); -extern void mga_driver_irq_uninstall(drm_device_t * dev); +extern void mga_driver_irq_preinstall(struct drm_device * dev); +extern void mga_driver_irq_postinstall(struct drm_device * dev); +extern void mga_driver_irq_uninstall(struct drm_device * dev); extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/shared-core/mga_irq.c b/shared-core/mga_irq.c index 490d1fbb..8b555e2e 100644 --- a/shared-core/mga_irq.c +++ b/shared-core/mga_irq.c @@ -38,7 +38,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; int status; int handled = 0; @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { unsigned int cur_vblank; int ret = 0; @@ -98,7 +98,7 @@ int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence) +int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; unsigned int cur_fence; @@ -117,7 +117,7 @@ int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -void mga_driver_irq_preinstall(drm_device_t * dev) +void mga_driver_irq_preinstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -127,7 +127,7 @@ void mga_driver_irq_preinstall(drm_device_t * dev) MGA_WRITE(MGA_ICLEAR, ~0); } -void mga_driver_irq_postinstall(drm_device_t * dev) +void mga_driver_irq_postinstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -137,7 +137,7 @@ void mga_driver_irq_postinstall(drm_device_t * dev) MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); } -void mga_driver_irq_uninstall(drm_device_t * dev) +void mga_driver_irq_uninstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (!dev_priv) diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 527f6ce7..72db0ced 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -504,7 +504,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv, * */ -static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) +static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -594,7 +594,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) FLUSH_DMA(); } -static void mga_dma_dispatch_swap(drm_device_t * dev) +static void mga_dma_dispatch_swap(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -651,7 +651,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) DRM_DEBUG("%s... done.\n", __FUNCTION__); } -static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) +static void mga_dma_dispatch_vertex(struct drm_device * dev, drm_buf_t * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -698,7 +698,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) FLUSH_DMA(); } -static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf, +static void mga_dma_dispatch_indices(struct drm_device * dev, drm_buf_t * buf, unsigned int start, unsigned int end) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -747,7 +747,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf, /* This copies a 64 byte aligned agp region to the frambuffer with a * standard blit, the ioctl needs to do checking. */ -static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf, +static void mga_dma_dispatch_iload(struct drm_device * dev, drm_buf_t * buf, unsigned int dstorg, unsigned int length) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -800,7 +800,7 @@ static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf, FLUSH_DMA(); } -static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit) +static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -917,8 +917,8 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_vertex_t vertex; @@ -957,8 +957,8 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_indices_t indices; @@ -996,7 +996,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) static int mga_dma_iload(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; drm_buf_t *buf; drm_mga_buf_priv_t *buf_priv; @@ -1158,7 +1158,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS) return 0; } -drm_ioctl_desc_t mga_ioctls[] = { +struct drm_ioctl_desc mga_ioctls[] = { [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 236dd4a1..56c25a6e 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -469,7 +469,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_fifo_alloc init; - drm_map_list_t *entry; + struct drm_map_list *entry; struct nouveau_fifo *chan; int res; @@ -526,7 +526,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) * finally, the ioctl table ***********************************/ -drm_ioctl_desc_t nouveau_ioctls[] = { +struct drm_ioctl_desc nouveau_ioctls[] = { [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH}, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index ef9df359..e5906867 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -469,7 +469,7 @@ alloc_ok: if (flags&NOUVEAU_MEM_MAPPED) { - drm_map_list_t *entry; + struct drm_map_list *entry; int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index 2d650b47..167fc070 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -81,7 +81,7 @@ static u32 r128_cce_microcode[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; -static int R128_READ_PLL(drm_device_t * dev, int addr) +static int R128_READ_PLL(struct drm_device * dev, int addr) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -271,7 +271,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv) /* Reset the engine. This will stop the CCE if it is running. */ -static int r128_do_engine_reset(drm_device_t * dev) +static int r128_do_engine_reset(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; @@ -308,7 +308,7 @@ static int r128_do_engine_reset(drm_device_t * dev) return 0; } -static void r128_cce_init_ring_buffer(drm_device_t * dev, +static void r128_cce_init_ring_buffer(struct drm_device * dev, drm_r128_private_t * dev_priv) { u32 ring_start; @@ -347,7 +347,7 @@ static void r128_cce_init_ring_buffer(drm_device_t * dev, R128_WRITE(R128_BUS_CNTL, tmp); } -static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init) +static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) { drm_r128_private_t *dev_priv; @@ -584,7 +584,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init) return 0; } -int r128_do_cleanup_cce(drm_device_t * dev) +int r128_do_cleanup_cce(struct drm_device * dev) { /* Make sure interrupts are disabled here because the uninstall ioctl @@ -767,11 +767,11 @@ int r128_fullscreen(DRM_IOCTL_ARGS) #define R128_BUFFER_FREE 0 #if 0 -static int r128_freelist_init(drm_device_t * dev) +static int r128_freelist_init(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_freelist_t *entry; int i; @@ -813,12 +813,12 @@ static int r128_freelist_init(drm_device_t * dev) } #endif -static drm_buf_t *r128_freelist_get(drm_device_t * dev) +static struct drm_buf *r128_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv; - drm_buf_t *buf; + struct drm_buf *buf; int i, t; /* FIXME: Optimize -- use freelist code */ @@ -851,13 +851,13 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev) return NULL; } -void r128_freelist_reset(drm_device_t * dev) +void r128_freelist_reset(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_r128_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } @@ -884,10 +884,10 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) return DRM_ERR(EBUSY); } -static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, struct drm_dma * d) +static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { int i; - drm_buf_t *buf; + struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = r128_freelist_get(dev); @@ -911,7 +911,7 @@ static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, struct drm_dma int r128_cce_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int ret = 0; struct drm_dma __user *argp = (void __user *)data; struct drm_dma d; diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index 90868356..0791713a 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -118,7 +118,7 @@ typedef struct drm_r128_private { drm_local_map_t *cce_ring; drm_local_map_t *ring_rptr; drm_local_map_t *agp_textures; - drm_ati_pcigart_info gart_info; + struct ati_pcigart_info gart_info; } drm_r128_private_t; typedef struct drm_r128_buf_priv { @@ -129,7 +129,7 @@ typedef struct drm_r128_buf_priv { drm_r128_freelist_t *list_entry; } drm_r128_buf_priv_t; -extern drm_ioctl_desc_t r128_ioctls[]; +extern struct drm_ioctl_desc r128_ioctls[]; extern int r128_max_ioctl; /* r128_cce.c */ @@ -142,21 +142,21 @@ extern int r128_engine_reset(DRM_IOCTL_ARGS); extern int r128_fullscreen(DRM_IOCTL_ARGS); extern int r128_cce_buffers(DRM_IOCTL_ARGS); -extern void r128_freelist_reset(drm_device_t * dev); +extern void r128_freelist_reset(struct drm_device * dev); extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); -extern int r128_do_cleanup_cce(drm_device_t * dev); +extern int r128_do_cleanup_cce(struct drm_device * dev); -extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); -extern void r128_driver_irq_preinstall(drm_device_t * dev); -extern void r128_driver_irq_postinstall(drm_device_t * dev); -extern void r128_driver_irq_uninstall(drm_device_t * dev); -extern void r128_driver_lastclose(drm_device_t * dev); -extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp); +extern void r128_driver_irq_preinstall(struct drm_device * dev); +extern void r128_driver_irq_postinstall(struct drm_device * dev); +extern void r128_driver_irq_uninstall(struct drm_device * dev); +extern void r128_driver_lastclose(struct drm_device * dev); +extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp); extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/shared-core/r128_irq.c b/shared-core/r128_irq.c index 87f8ca2b..c76fdca7 100644 --- a/shared-core/r128_irq.c +++ b/shared-core/r128_irq.c @@ -37,7 +37,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; int status; @@ -54,7 +54,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { unsigned int cur_vblank; int ret = 0; @@ -72,7 +72,7 @@ int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -void r128_driver_irq_preinstall(drm_device_t * dev) +void r128_driver_irq_preinstall(struct drm_device * dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; @@ -82,7 +82,7 @@ void r128_driver_irq_preinstall(drm_device_t * dev) R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); } -void r128_driver_irq_postinstall(drm_device_t * dev) +void r128_driver_irq_postinstall(struct drm_device * dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; @@ -90,7 +90,7 @@ void r128_driver_irq_postinstall(drm_device_t * dev) R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); } -void r128_driver_irq_uninstall(drm_device_t * dev) +void r128_driver_irq_uninstall(struct drm_device * dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; if (!dev_priv) diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index 565e0d4f..b793d94b 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -352,7 +352,7 @@ static void r128_print_dirty(const char *msg, unsigned int flags) (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); } -static void r128_cce_dispatch_clear(drm_device_t * dev, +static void r128_cce_dispatch_clear(struct drm_device * dev, drm_r128_clear_t * clear) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -458,7 +458,7 @@ static void r128_cce_dispatch_clear(drm_device_t * dev, } } -static void r128_cce_dispatch_swap(drm_device_t * dev) +static void r128_cce_dispatch_swap(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -524,7 +524,7 @@ static void r128_cce_dispatch_swap(drm_device_t * dev) ADVANCE_RING(); } -static void r128_cce_dispatch_flip(drm_device_t * dev) +static void r128_cce_dispatch_flip(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -567,7 +567,7 @@ static void r128_cce_dispatch_flip(drm_device_t * dev) ADVANCE_RING(); } -static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) +static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; @@ -637,8 +637,8 @@ static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) sarea_priv->nbox = 0; } -static void r128_cce_dispatch_indirect(drm_device_t * dev, - drm_buf_t * buf, int start, int end) +static void r128_cce_dispatch_indirect(struct drm_device * dev, + struct drm_buf * buf, int start, int end) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; @@ -692,8 +692,8 @@ static void r128_cce_dispatch_indirect(drm_device_t * dev, dev_priv->sarea_priv->last_dispatch++; } -static void r128_cce_dispatch_indices(drm_device_t * dev, - drm_buf_t * buf, +static void r128_cce_dispatch_indices(struct drm_device * dev, + struct drm_buf * buf, int start, int end, int count) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -777,11 +777,11 @@ static void r128_cce_dispatch_indices(drm_device_t * dev, } static int r128_cce_dispatch_blit(DRMFILE filp, - drm_device_t * dev, drm_r128_blit_t * blit) + struct drm_device * dev, drm_r128_blit_t * blit) { drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; u32 *data; int dword_shift, dwords; @@ -887,7 +887,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp, * have hardware stencil support. */ -static int r128_cce_dispatch_write_span(drm_device_t * dev, +static int r128_cce_dispatch_write_span(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -983,7 +983,7 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev, return 0; } -static int r128_cce_dispatch_write_pixels(drm_device_t * dev, +static int r128_cce_dispatch_write_pixels(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1105,7 +1105,7 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev, return 0; } -static int r128_cce_dispatch_read_span(drm_device_t * dev, +static int r128_cce_dispatch_read_span(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1148,7 +1148,7 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev, return 0; } -static int r128_cce_dispatch_read_pixels(drm_device_t * dev, +static int r128_cce_dispatch_read_pixels(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1220,7 +1220,7 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev, * Polygon stipple */ -static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple) +static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) { drm_r128_private_t *dev_priv = dev->dev_private; int i; @@ -1269,7 +1269,7 @@ static int r128_cce_clear(DRM_IOCTL_ARGS) return 0; } -static int r128_do_init_pageflip(drm_device_t * dev) +static int r128_do_init_pageflip(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1288,7 +1288,7 @@ static int r128_do_init_pageflip(drm_device_t * dev) return 0; } -static int r128_do_cleanup_pageflip(drm_device_t * dev) +static int r128_do_cleanup_pageflip(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1354,8 +1354,8 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_vertex_t vertex; @@ -1413,8 +1413,8 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indices_t elts; int count; @@ -1483,7 +1483,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) static int r128_cce_blit(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_blit_t blit; int ret; @@ -1571,8 +1571,8 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indirect_t indirect; #if 0 @@ -1675,7 +1675,7 @@ static int r128_getparam(DRM_IOCTL_ARGS) return 0; } -void r128_driver_preclose(drm_device_t * dev, DRMFILE filp) +void r128_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1685,12 +1685,12 @@ void r128_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void r128_driver_lastclose(drm_device_t * dev) +void r128_driver_lastclose(struct drm_device * dev) { r128_do_cleanup_cce(dev); } -drm_ioctl_desc_t r128_ioctls[] = { +struct drm_ioctl_desc r128_ioctls[] = { [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index ab4f1cae..08015ecf 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -706,7 +706,7 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must * be careful about how this function is called. */ -static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf) +static void r300_discard_buffer(struct drm_device * dev, drm_buf_t * buf) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -778,13 +778,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, * commands on the DMA ring buffer. * Called by the ioctl handler function radeon_cp_cmdbuf. */ -int r300_do_cp_cmdbuf(drm_device_t *dev, +int r300_do_cp_cmdbuf(struct drm_device *dev, DRMFILE filp, - drm_file_t *filp_priv, + struct drm_file *filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_buf_t *buf = NULL; int emit_dispatch_age = 0; int ret = 0; diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index ba06443f..cef47ca0 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -36,7 +36,7 @@ #define RADEON_FIFO_DEBUG 0 -static int radeon_do_cleanup_cp(drm_device_t * dev); +static int radeon_do_cleanup_cp(struct drm_device * dev); /* CP microcode (from ATI) */ static const u32 R200_cp_microcode[][2] = { @@ -816,7 +816,7 @@ static const u32 R300_cp_microcode[][2] = { { 0000000000, 0000000000 }, }; -static int RADEON_READ_PLL(drm_device_t * dev, int addr) +static int RADEON_READ_PLL(struct drm_device * dev, int addr) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1066,7 +1066,7 @@ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) /* Reset the engine. This will stop the CP if it is running. */ -static int radeon_do_engine_reset(drm_device_t * dev) +static int radeon_do_engine_reset(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; @@ -1122,7 +1122,7 @@ static int radeon_do_engine_reset(drm_device_t * dev) return 0; } -static void radeon_cp_init_ring_buffer(drm_device_t * dev, +static void radeon_cp_init_ring_buffer(struct drm_device * dev, drm_radeon_private_t * dev_priv) { u32 ring_start, cur_read_ptr; @@ -1174,7 +1174,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, } else #endif { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; unsigned long tmp_ofs, page_ofs; tmp_ofs = dev_priv->ring_rptr->offset - @@ -1390,7 +1390,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) } } -static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) +static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1750,7 +1750,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) return 0; } -static int radeon_do_cleanup_cp(drm_device_t * dev) +static int radeon_do_cleanup_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1806,7 +1806,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev) * * Charl P. Botha */ -static int radeon_do_resume_cp(drm_device_t * dev) +static int radeon_do_resume_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1933,7 +1933,7 @@ int radeon_cp_stop(DRM_IOCTL_ARGS) return 0; } -void radeon_do_release(drm_device_t * dev) +void radeon_do_release(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; int i, ret; @@ -2066,9 +2066,9 @@ int radeon_fullscreen(DRM_IOCTL_ARGS) * they can't get the lock. */ -drm_buf_t *radeon_freelist_get(drm_device_t * dev) +drm_buf_t *radeon_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; @@ -2106,9 +2106,9 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev) } #if 0 -drm_buf_t *radeon_freelist_get(drm_device_t * dev) +drm_buf_t *radeon_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; @@ -2140,9 +2140,9 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev) } #endif -void radeon_freelist_reset(drm_device_t * dev) +void radeon_freelist_reset(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; @@ -2190,7 +2190,7 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) return DRM_ERR(EBUSY); } -static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev, +static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { int i; @@ -2218,7 +2218,7 @@ static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev, int radeon_cp_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int ret = 0; struct drm_dma __user *argp = (void __user *)data; struct drm_dma d; diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 03d2e7f2..dfa811c2 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -295,7 +295,7 @@ typedef struct drm_radeon_private { unsigned long pcigart_offset; unsigned int pcigart_offset_set; - drm_ati_pcigart_info gart_info; + struct ati_pcigart_info gart_info; u32 scratch_ages[5]; @@ -316,7 +316,7 @@ typedef struct drm_radeon_kcmd_buffer { } drm_radeon_kcmd_buffer_t; extern int radeon_no_wb; -extern drm_ioctl_desc_t radeon_ioctls[]; +extern struct drm_ioctl_desc radeon_ioctls[]; extern int radeon_max_ioctl; /* Check whether the given hardware address is inside the framebuffer or the @@ -345,8 +345,8 @@ extern int radeon_engine_reset(DRM_IOCTL_ARGS); extern int radeon_fullscreen(DRM_IOCTL_ARGS); extern int radeon_cp_buffers(DRM_IOCTL_ARGS); -extern void radeon_freelist_reset(drm_device_t * dev); -extern drm_buf_t *radeon_freelist_get(drm_device_t * dev); +extern void radeon_freelist_reset(struct drm_device * dev); +extern drm_buf_t *radeon_freelist_get(struct drm_device * dev); extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); @@ -362,33 +362,33 @@ extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap); extern int radeon_irq_emit(DRM_IOCTL_ARGS); extern int radeon_irq_wait(DRM_IOCTL_ARGS); -extern void radeon_do_release(drm_device_t * dev); -extern int radeon_driver_vblank_wait(drm_device_t * dev, +extern void radeon_do_release(struct drm_device * dev); +extern int radeon_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); -extern int radeon_driver_vblank_wait2(drm_device_t * dev, +extern int radeon_driver_vblank_wait2(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); -extern void radeon_driver_irq_preinstall(drm_device_t * dev); -extern void radeon_driver_irq_postinstall(drm_device_t * dev); -extern void radeon_driver_irq_uninstall(drm_device_t * dev); -extern int radeon_vblank_crtc_get(drm_device_t *dev); -extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value); +extern void radeon_driver_irq_preinstall(struct drm_device * dev); +extern void radeon_driver_irq_postinstall(struct drm_device * dev); +extern void radeon_driver_irq_uninstall(struct drm_device * dev); +extern int radeon_vblank_crtc_get(struct drm_device *dev); +extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); extern int radeon_driver_firstopen(struct drm_device *dev); -extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp); -extern void radeon_driver_lastclose(drm_device_t * dev); -extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv); +extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); +extern void radeon_driver_lastclose(struct drm_device * dev); +extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* r300_cmdbuf.c */ extern void r300_init_reg_flags(void); -extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, - drm_file_t* filp_priv, +extern int r300_do_cp_cmdbuf(struct drm_device *dev, DRMFILE filp, + struct drm_file* filp_priv, drm_radeon_kcmd_buffer_t* cmdbuf); /* Flags for stats.boxes diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index a4be86e3..ad8a0ac7 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -64,7 +64,7 @@ static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; u32 stat; @@ -109,7 +109,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_HANDLED; } -static int radeon_emit_irq(drm_device_t * dev) +static int radeon_emit_irq(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; unsigned int ret; @@ -127,7 +127,7 @@ static int radeon_emit_irq(drm_device_t * dev) return ret; } -static int radeon_wait_irq(drm_device_t * dev, int swi_nr) +static int radeon_wait_irq(struct drm_device * dev, int swi_nr) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -144,7 +144,7 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr) return ret; } -int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence, +int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence, int crtc) { drm_radeon_private_t *dev_priv = @@ -184,12 +184,12 @@ int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence, return ret; } -int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) { return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1); } -int radeon_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence) +int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) { return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2); } @@ -242,7 +242,7 @@ int radeon_irq_wait(DRM_IOCTL_ARGS) return radeon_wait_irq(dev, irqwait.irq_seq); } -static void radeon_enable_interrupt(drm_device_t *dev) +static void radeon_enable_interrupt(struct drm_device *dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -259,7 +259,7 @@ static void radeon_enable_interrupt(drm_device_t *dev) /* drm_dma.h hooks */ -void radeon_driver_irq_preinstall(drm_device_t * dev) +void radeon_driver_irq_preinstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -273,7 +273,7 @@ void radeon_driver_irq_preinstall(drm_device_t * dev) RADEON_CRTC2_VBLANK_STAT)); } -void radeon_driver_irq_postinstall(drm_device_t * dev) +void radeon_driver_irq_postinstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -284,7 +284,7 @@ void radeon_driver_irq_postinstall(drm_device_t * dev) radeon_enable_interrupt(dev); } -void radeon_driver_irq_uninstall(drm_device_t * dev) +void radeon_driver_irq_uninstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -298,7 +298,7 @@ void radeon_driver_irq_uninstall(drm_device_t * dev) } -int radeon_vblank_crtc_get(drm_device_t *dev) +int radeon_vblank_crtc_get(struct drm_device *dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; u32 flag; @@ -315,7 +315,7 @@ int radeon_vblank_crtc_get(drm_device_t *dev) return value; } -int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value) +int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 882cd323..13b09d44 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -39,7 +39,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, u32 * offset) { u64 off = *offset; @@ -90,7 +90,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, int id, u32 *data) { switch (id) { @@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * dev_priv, - drm_file_t *filp_priv, + struct drm_file *filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf, unsigned int *cmdsz) @@ -439,7 +439,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, /* Emit 1.1 state */ static int radeon_emit_state(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, drm_radeon_context_regs_t * ctx, drm_radeon_texture_regs_t * tex, unsigned int dirty) @@ -608,7 +608,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, /* Emit 1.2 state */ static int radeon_emit_state2(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, drm_radeon_state_t * state) { RING_LOCALS; @@ -844,7 +844,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) * CP command dispatch functions */ -static void radeon_cp_dispatch_clear(drm_device_t * dev, +static void radeon_cp_dispatch_clear(struct drm_device * dev, drm_radeon_clear_t * clear, drm_radeon_clear_rect_t * depth_boxes) { @@ -1335,7 +1335,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, ADVANCE_RING(); } -static void radeon_cp_dispatch_swap(drm_device_t * dev) +static void radeon_cp_dispatch_swap(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -1412,7 +1412,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) ADVANCE_RING(); } -static void radeon_cp_dispatch_flip(drm_device_t * dev) +static void radeon_cp_dispatch_flip(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; @@ -1491,8 +1491,8 @@ typedef struct { unsigned int vc_format; } drm_radeon_tcl_prim_t; -static void radeon_cp_dispatch_vertex(drm_device_t * dev, - drm_buf_t * buf, +static void radeon_cp_dispatch_vertex(struct drm_device * dev, + struct drm_buf * buf, drm_radeon_tcl_prim_t * prim) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1537,7 +1537,7 @@ static void radeon_cp_dispatch_vertex(drm_device_t * dev, } while (i < nbox); } -static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf) +static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -1554,8 +1554,8 @@ static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf) buf->used = 0; } -static void radeon_cp_dispatch_indirect(drm_device_t * dev, - drm_buf_t * buf, int start, int end) +static void radeon_cp_dispatch_indirect(struct drm_device * dev, + struct drm_buf * buf, int start, int end) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1588,8 +1588,8 @@ static void radeon_cp_dispatch_indirect(drm_device_t * dev, } } -static void radeon_cp_dispatch_indices(drm_device_t * dev, - drm_buf_t * elt_buf, +static void radeon_cp_dispatch_indices(struct drm_device * dev, + struct drm_buf * elt_buf, drm_radeon_tcl_prim_t * prim) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1647,13 +1647,13 @@ static void radeon_cp_dispatch_indices(drm_device_t * dev, #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE static int radeon_cp_dispatch_texture(DRMFILE filp, - drm_device_t * dev, + struct drm_device * dev, drm_radeon_texture_t * tex, drm_radeon_tex_image_t * image) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; - drm_buf_t *buf; + struct drm_file *filp_priv; + struct drm_buf *buf; u32 format; u32 *buffer; const u8 __user *data; @@ -1881,7 +1881,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, return 0; } -static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple) +static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple) { drm_radeon_private_t *dev_priv = dev->dev_private; int i; @@ -2144,7 +2144,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS) /* Not sure why this isn't set all the time: */ -static int radeon_do_init_pageflip(drm_device_t * dev) +static int radeon_do_init_pageflip(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -2216,10 +2216,10 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_vertex_t vertex; drm_radeon_tcl_prim_t prim; @@ -2306,10 +2306,10 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_indices_t elts; drm_radeon_tcl_prim_t prim; int count; @@ -2461,8 +2461,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_indirect_t indirect; RING_LOCALS; @@ -2535,10 +2535,10 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_vertex2_t vertex; int i; unsigned char laststate; @@ -2638,7 +2638,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) } static int radeon_emit_packets(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { @@ -2763,8 +2763,8 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, return 0; } -static int radeon_emit_packet3(drm_device_t * dev, - drm_file_t * filp_priv, +static int radeon_emit_packet3(struct drm_device * dev, + struct drm_file * filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -2789,8 +2789,8 @@ static int radeon_emit_packet3(drm_device_t * dev, return 0; } -static int radeon_emit_packet3_cliprect(drm_device_t *dev, - drm_file_t *filp_priv, +static int radeon_emit_packet3_cliprect(struct drm_device *dev, + struct drm_file *filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int orig_nbox) { @@ -2851,7 +2851,7 @@ static int radeon_emit_packet3_cliprect(drm_device_t *dev, return 0; } -static int radeon_emit_wait(drm_device_t * dev, int flags) +static int radeon_emit_wait(struct drm_device * dev, int flags) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -2884,9 +2884,9 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf = NULL; + struct drm_file *filp_priv; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf = NULL; int idx; drm_radeon_kcmd_buffer_t cmdbuf; drm_radeon_cmd_header_t header; @@ -3151,7 +3151,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_setparam_t sp; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3213,7 +3213,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) * * DRM infrastructure takes care of reclaiming dma buffers. */ -void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp) +void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -3224,7 +3224,7 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void radeon_driver_lastclose(drm_device_t * dev) +void radeon_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -3237,7 +3237,7 @@ void radeon_driver_lastclose(drm_device_t * dev) radeon_do_release(dev); } -int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) +int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3259,7 +3259,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) return 0; } -void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv) +void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp_priv) { struct drm_radeon_driver_file_fields *radeon_priv = filp_priv->driver_priv; @@ -3267,7 +3267,7 @@ void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv) drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); } -drm_ioctl_desc_t radeon_ioctls[] = { +struct drm_ioctl_desc radeon_ioctls[] = { [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 7492a38c..5a41b238 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -32,7 +32,7 @@ #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 -static int savage_do_cleanup_bci(drm_device_t *dev); +static int savage_do_cleanup_bci(struct drm_device *dev); static int savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) @@ -203,10 +203,10 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, /* * Freelist management */ -static int savage_freelist_init(drm_device_t *dev) +static int savage_freelist_init(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_buf_t *buf; drm_savage_buf_priv_t *entry; int i; @@ -236,7 +236,7 @@ static int savage_freelist_init(drm_device_t *dev) return 0; } -static drm_buf_t *savage_freelist_get(drm_device_t *dev) +static drm_buf_t *savage_freelist_get(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *tail = dev_priv->tail.prev; @@ -269,7 +269,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t *dev) return NULL; } -void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf) +void savage_freelist_put(struct drm_device *dev, drm_buf_t *buf) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; @@ -535,7 +535,7 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } -int savage_driver_load(drm_device_t *dev, unsigned long chipset) +int savage_driver_load(struct drm_device *dev, unsigned long chipset) { drm_savage_private_t *dev_priv; @@ -557,7 +557,7 @@ int savage_driver_load(drm_device_t *dev, unsigned long chipset) * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ -int savage_driver_firstopen(drm_device_t *dev) +int savage_driver_firstopen(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; @@ -654,7 +654,7 @@ int savage_driver_firstopen(drm_device_t *dev) /* * Delete MTRRs and free device-private data. */ -void savage_driver_lastclose(drm_device_t *dev) +void savage_driver_lastclose(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; int i; @@ -666,7 +666,7 @@ void savage_driver_lastclose(drm_device_t *dev) dev_priv->mtrr[i].size, DRM_MTRR_WC); } -int savage_driver_unload(drm_device_t *dev) +int savage_driver_unload(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -675,7 +675,7 @@ int savage_driver_unload(drm_device_t *dev) return 0; } -static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) +static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -897,7 +897,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) return 0; } -static int savage_do_cleanup_bci(drm_device_t *dev) +static int savage_do_cleanup_bci(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -1006,7 +1006,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS) * DMA buffer management */ -static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, struct drm_dma *d) +static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d) { drm_buf_t *buf; int i; @@ -1033,7 +1033,7 @@ static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, struct drm_dm int savage_bci_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_dma d; int ret = 0; @@ -1068,9 +1068,9 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) return ret; } -void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) +void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_savage_private_t *dev_priv = dev->dev_private; int i; @@ -1100,7 +1100,7 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) drm_core_reclaim_buffers(dev, filp); } -drm_ioctl_desc_t savage_ioctls[] = { +struct drm_ioctl_desc savage_ioctls[] = { [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 8d04d43b..39c2c751 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -104,7 +104,7 @@ enum savage_family { S3_LAST }; -extern drm_ioctl_desc_t savage_ioctls[]; +extern struct drm_ioctl_desc savage_ioctls[]; extern int savage_max_ioctl; #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) @@ -203,16 +203,16 @@ extern int savage_bci_buffers(DRM_IOCTL_ARGS); /* BCI functions */ extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, unsigned int flags); -extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf); +extern void savage_freelist_put(struct drm_device *dev, drm_buf_t *buf); extern void savage_dma_reset(drm_savage_private_t *dev_priv); extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n); -extern int savage_driver_load(drm_device_t *dev, unsigned long chipset); -extern int savage_driver_firstopen(drm_device_t *dev); -extern void savage_driver_lastclose(drm_device_t *dev); -extern int savage_driver_unload(drm_device_t *dev); -extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp); +extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); +extern int savage_driver_firstopen(struct drm_device *dev); +extern void savage_driver_lastclose(struct drm_device *dev); +extern int savage_driver_unload(struct drm_device *dev); +extern void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp); /* state functions */ extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 54b9169b..93d2081b 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -956,7 +956,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_savage_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_buf_t *dmabuf; drm_savage_cmdbuf_t cmdbuf; drm_savage_cmd_header_t *kcmd_addr = NULL; diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index ec572ad4..c174e294 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -66,9 +66,9 @@ typedef struct drm_sis_private { unsigned long agp_offset; } drm_sis_private_t; -extern int sis_idle(drm_device_t *dev); -extern void sis_reclaim_buffers_locked(drm_device_t *dev, struct file *filp); -extern void sis_lastclose(drm_device_t *dev); +extern int sis_idle(struct drm_device *dev); +extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); +extern void sis_lastclose(struct drm_device *dev); #else #include "sis_ds.h" @@ -78,14 +78,14 @@ typedef struct drm_sis_private { memHeap_t *FBHeap; } drm_sis_private_t; -extern int sis_init_context(drm_device_t * dev, int context); -extern int sis_final_context(drm_device_t * dev, int context); +extern int sis_init_context(struct drm_device * dev, int context); +extern int sis_final_context(struct drm_device * dev, int context); #endif -extern drm_ioctl_desc_t sis_ioctls[]; +extern struct drm_ioctl_desc sis_ioctls[]; extern int sis_max_ioctl; #endif diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 333c4bcf..48f46938 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -139,7 +139,7 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv, return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); } -int via_dma_cleanup(drm_device_t * dev) +int via_dma_cleanup(struct drm_device * dev) { if (dev->dev_private) { drm_via_private_t *dev_priv = @@ -157,7 +157,7 @@ int via_dma_cleanup(drm_device_t * dev) return 0; } -static int via_initialize(drm_device_t * dev, +static int via_initialize(struct drm_device * dev, drm_via_private_t * dev_priv, drm_via_dma_init_t * init) { @@ -252,7 +252,7 @@ static int via_dma_init(DRM_IOCTL_ARGS) -static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) +static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) { drm_via_private_t *dev_priv; uint32_t *vb; @@ -306,7 +306,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) return 0; } -int via_driver_dma_quiescent(drm_device_t * dev) +int via_driver_dma_quiescent(struct drm_device * dev) { drm_via_private_t *dev_priv = dev->dev_private; @@ -346,7 +346,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS) return 0; } -static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, +static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) { drm_via_private_t *dev_priv = dev->dev_private; @@ -718,7 +718,7 @@ via_dma_blit( DRM_IOCTL_ARGS ) { } #endif -drm_ioctl_desc_t via_ioctls[] = { +struct drm_ioctl_desc via_ioctls[] = { [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index b6dbf6c1..85ddc53a 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -148,7 +148,7 @@ enum via_family { #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) -extern drm_ioctl_desc_t via_ioctls[]; +extern struct drm_ioctl_desc via_ioctls[]; extern int via_max_ioctl; extern int via_fb_init(DRM_IOCTL_ARGS); @@ -161,41 +161,41 @@ extern int via_wait_irq(DRM_IOCTL_ARGS); extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); extern int via_dma_blit( DRM_IOCTL_ARGS ); -extern int via_driver_load(drm_device_t *dev, unsigned long chipset); -extern int via_driver_unload(drm_device_t *dev); -extern int via_final_context(drm_device_t * dev, int context); +extern int via_driver_load(struct drm_device *dev, unsigned long chipset); +extern int via_driver_unload(struct drm_device *dev); +extern int via_final_context(struct drm_device * dev, int context); -extern int via_do_cleanup_map(drm_device_t * dev); -extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int via_do_cleanup_map(struct drm_device * dev); +extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); -extern void via_driver_irq_preinstall(drm_device_t * dev); -extern void via_driver_irq_postinstall(drm_device_t * dev); -extern void via_driver_irq_uninstall(drm_device_t * dev); +extern void via_driver_irq_preinstall(struct drm_device * dev); +extern void via_driver_irq_postinstall(struct drm_device * dev); +extern void via_driver_irq_uninstall(struct drm_device * dev); -extern int via_dma_cleanup(drm_device_t * dev); +extern int via_dma_cleanup(struct drm_device * dev); extern void via_init_command_verifier(void); -extern int via_driver_dma_quiescent(drm_device_t * dev); +extern int via_driver_dma_quiescent(struct drm_device * dev); extern void via_init_futex(drm_via_private_t *dev_priv); extern void via_cleanup_futex(drm_via_private_t *dev_priv); extern void via_release_futex(drm_via_private_t *dev_priv, int context); #ifdef VIA_HAVE_CORE_MM -extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp); -extern void via_lastclose(drm_device_t *dev); +extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); +extern void via_lastclose(struct drm_device *dev); #else -extern int via_init_context(drm_device_t * dev, int context); +extern int via_init_context(struct drm_device * dev, int context); #endif #ifdef VIA_HAVE_DMABLIT -extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq); -extern void via_init_dmablit(drm_device_t *dev); +extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq); +extern void via_init_dmablit(struct drm_device *dev); #endif #ifdef VIA_HAVE_FENCE extern void via_fence_timer(unsigned long data); -extern void via_poke_flush(drm_device_t * dev, uint32_t class); -extern int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, +extern void via_poke_flush(struct drm_device * dev, uint32_t class); +extern int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type); @@ -204,10 +204,10 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class, #endif #ifdef VIA_HAVE_BUFFER -extern drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t *dev); +extern drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device *dev); extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *type); -extern int via_invalidate_caches(drm_device_t *dev, uint64_t buffer_flags); -extern int via_init_mem_type(drm_device_t *dev, uint32_t type, +extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int via_init_mem_type(struct drm_device *dev, uint32_t type, drm_mem_type_manager_t *man); extern uint32_t via_evict_mask(drm_buffer_object_t *bo); extern int via_move(drm_buffer_object_t *bo, int evict, diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index 2ac86970..040df548 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -99,7 +99,7 @@ static unsigned time_diff(struct timeval *now,struct timeval *then) irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; int handled = 0; @@ -171,7 +171,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv) } } -int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned int cur_vblank; @@ -199,7 +199,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) } static int -via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, +via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -253,7 +253,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, * drm_dma.h hooks */ -void via_driver_irq_preinstall(drm_device_t * dev) +void via_driver_irq_preinstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; @@ -302,7 +302,7 @@ void via_driver_irq_preinstall(drm_device_t * dev) } } -void via_driver_irq_postinstall(drm_device_t * dev) +void via_driver_irq_postinstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; @@ -321,7 +321,7 @@ void via_driver_irq_postinstall(drm_device_t * dev) } } -void via_driver_irq_uninstall(drm_device_t * dev) +void via_driver_irq_uninstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 037a1c2c..2381eaa9 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -25,7 +25,7 @@ #include "via_drm.h" #include "via_drv.h" -static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) +static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init) { drm_via_private_t *dev_priv = dev->dev_private; int ret = 0; @@ -83,7 +83,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) } -int via_do_cleanup_map(drm_device_t * dev) +int via_do_cleanup_map(struct drm_device * dev) { via_dma_cleanup(dev); @@ -111,7 +111,7 @@ int via_map_init(DRM_IOCTL_ARGS) return -EINVAL; } -int via_driver_load(drm_device_t *dev, unsigned long chipset) +int via_driver_load(struct drm_device *dev, unsigned long chipset) { drm_via_private_t *dev_priv; int ret = 0; @@ -133,7 +133,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset) return ret; } -int via_driver_unload(drm_device_t *dev) +int via_driver_unload(struct drm_device *dev) { drm_via_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index 4b844af0..038bea2f 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -252,10 +252,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { #ifdef __linux__ - drm_map_list_t *r_list; + struct drm_map_list *r_list; #endif drm_local_map_t *map = seq->map_cache; @@ -967,7 +967,7 @@ via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer, int via_verify_command_stream(const uint32_t * buf, unsigned int size, - drm_device_t * dev, int agp) + struct drm_device * dev, int agp) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -1042,7 +1042,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, } int -via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, +via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, unsigned int size) { diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index 84497c44..dac1db91 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -49,7 +49,7 @@ typedef struct { drm_via_sequence_t unfinished; int agp_texture; int multitex; - drm_device_t *dev; + struct drm_device *dev; drm_local_map_t *map_cache; uint32_t vertex_count; int agp; @@ -57,8 +57,8 @@ typedef struct { } drm_via_state_t; extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, - drm_device_t *dev, int agp); -extern int via_parse_command_stream(drm_device_t *dev, const uint32_t * buf, + struct drm_device *dev, int agp); +extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf, unsigned int size); #endif From 6dce9e07352e14d2e03d26b8a64a40e111ecab2b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 12:48:44 +1000 Subject: [PATCH 117/437] drm: remove hashtab/sman and object typedefs --- linux-core/drmP.h | 2 +- linux-core/drm_bo.c | 12 ++++---- linux-core/drm_bufs.c | 2 +- linux-core/drm_fence.c | 8 +++--- linux-core/drm_fops.c | 8 +++--- linux-core/drm_hashtab.c | 34 +++++++++++----------- linux-core/drm_hashtab.h | 24 ++++++++-------- linux-core/drm_object.c | 62 ++++++++++++++++++++-------------------- linux-core/drm_objects.h | 42 +++++++++++++-------------- linux-core/drm_sman.c | 56 ++++++++++++++++++------------------ linux-core/drm_sman.h | 50 ++++++++++++++++---------------- linux-core/sis_mm.c | 4 +-- linux-core/via_mm.c | 2 +- shared-core/sis_drv.h | 2 +- shared-core/via_drv.h | 2 +- 15 files changed, 155 insertions(+), 155 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index df7481fe..87a194af 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -418,7 +418,7 @@ struct drm_file { struct list_head refd_objects; struct list_head user_objects; - drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; + struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; void *driver_priv; }; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 10d928ea..30664632 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -505,7 +505,7 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) } } -static void drm_bo_base_deref_locked(struct drm_file * priv, drm_user_object_t * uo) +static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) { drm_buffer_object_t *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); @@ -924,7 +924,7 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { - drm_user_object_t *uo; + struct drm_user_object *uo; drm_buffer_object_t *bo; uo = drm_lookup_user_object(priv, handle); @@ -1187,7 +1187,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; - drm_ref_object_t *ro; + struct drm_ref_object *ro; int ret = 0; mutex_lock(&dev->struct_mutex); @@ -1216,7 +1216,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) */ static void drm_buffer_user_object_unmap(struct drm_file * priv, - drm_user_object_t * uo, + struct drm_user_object * uo, drm_ref_t action) { drm_buffer_object_t *bo = @@ -1811,7 +1811,7 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_bo_handle_arg arg; - drm_user_object_t *uo; + struct drm_user_object *uo; int ret = 0; if (!dev->bm.initialized) { @@ -1879,7 +1879,7 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) struct drm_bo_reference_info_arg arg; struct drm_bo_handle_arg *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; - drm_user_object_t *uo; + struct drm_user_object *uo; int ret; if (!dev->bm.initialized) { diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 75eeafdd..f766597b 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -63,7 +63,7 @@ struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map } EXPORT_SYMBOL(drm_find_matching_map); -static int drm_map_handle(struct drm_device *dev, drm_hash_item_t *hash, +static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, unsigned long user_token, int hashed_handle) { int use_hashed_handle; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 4f24b4b5..f925621a 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -180,7 +180,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } -static void drm_fence_object_destroy(struct drm_file *priv, drm_user_object_t * base) +static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); @@ -551,7 +551,7 @@ void drm_fence_manager_takedown(struct drm_device * dev) drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; - drm_user_object_t *uo; + struct drm_user_object *uo; drm_fence_object_t *fence; mutex_lock(&dev->struct_mutex); @@ -619,7 +619,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) int ret; drm_fence_manager_t *fm = &dev->fm; struct drm_fence_arg arg; - drm_user_object_t *uo; + struct drm_user_object *uo; ret = 0; if (!fm->initialized) { @@ -648,7 +648,7 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) drm_fence_manager_t *fm = &dev->fm; struct drm_fence_arg arg; drm_fence_object_t *fence; - drm_user_object_t *uo; + struct drm_user_object *uo; unsigned long flags; ret = 0; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 5ea3f9cf..98e581fe 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -337,8 +337,8 @@ static void drm_object_release(struct file *filp) { struct drm_file *priv = filp->private_data; struct list_head *head; - drm_user_object_t *user_object; - drm_ref_object_t *ref_object; + struct drm_user_object *user_object; + struct drm_ref_object *ref_object; int i; /* @@ -351,7 +351,7 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; while (head->next != head) { - ref_object = list_entry(head->next, drm_ref_object_t, list); + ref_object = list_entry(head->next, struct drm_ref_object, list); drm_remove_ref_object(priv, ref_object); head = &priv->refd_objects; } @@ -362,7 +362,7 @@ static void drm_object_release(struct file *filp) { head = &priv->user_objects; while (head->next != head) { - user_object = list_entry(head->next, drm_user_object_t, list); + user_object = list_entry(head->next, struct drm_user_object, list); drm_remove_user_object(priv, user_object); head = &priv->user_objects; } diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c index 6f17e114..a8ec8468 100644 --- a/linux-core/drm_hashtab.c +++ b/linux-core/drm_hashtab.c @@ -36,7 +36,7 @@ #include "drm_hashtab.h" #include -int drm_ht_create(drm_open_hash_t * ht, unsigned int order) +int drm_ht_create(struct drm_open_hash * ht, unsigned int order) { unsigned int i; @@ -63,9 +63,9 @@ int drm_ht_create(drm_open_hash_t * ht, unsigned int order) return 0; } -void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) +void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -75,15 +75,15 @@ void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); } } -static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, +static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -91,7 +91,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return list; if (entry->key > key) @@ -100,9 +100,9 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, return NULL; } -int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list, *parent; unsigned int hashed_key; @@ -112,7 +112,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) h_list = &ht->table[hashed_key]; parent = NULL; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return -EINVAL; if (entry->key > key) @@ -131,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) * Just insert an item and return any "bits" bit key that hasn't been * used before. */ -int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, +int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item, unsigned long seed, int bits, int shift, unsigned long add) { @@ -155,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, return 0; } -int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, - drm_hash_item_t ** item) +int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key, + struct drm_hash_item ** item) { struct hlist_node *list; @@ -164,11 +164,11 @@ int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, if (!list) return -EINVAL; - *item = hlist_entry(list, drm_hash_item_t, head); + *item = hlist_entry(list, struct drm_hash_item, head); return 0; } -int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) +int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key) { struct hlist_node *list; @@ -181,14 +181,14 @@ int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) return -EINVAL; } -int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item) { hlist_del_init(&item->head); ht->fill--; return 0; } -void drm_ht_remove(drm_open_hash_t * ht) +void drm_ht_remove(struct drm_open_hash * ht) { if (ht->table) { if (ht->use_vmalloc) diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h index 613091c9..0f137677 100644 --- a/linux-core/drm_hashtab.h +++ b/linux-core/drm_hashtab.h @@ -37,31 +37,31 @@ #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef struct drm_hash_item{ +struct drm_hash_item { struct hlist_node head; unsigned long key; -} drm_hash_item_t; +}; -typedef struct drm_open_hash{ +struct drm_open_hash { unsigned int size; unsigned int order; unsigned int fill; struct hlist_head *table; int use_vmalloc; -} drm_open_hash_t; +}; -extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order); -extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item, +extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, unsigned long seed, int bits, int shift, unsigned long add); -extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item); +extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); -extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern void drm_ht_remove(drm_open_hash_t *ht); +extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern void drm_ht_remove(struct drm_open_hash *ht); #endif diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 3c60605c..00627725 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,7 +30,7 @@ #include "drmP.h" -int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, +int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable) { struct drm_device *dev = priv->head->dev; @@ -51,12 +51,12 @@ int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, return 0; } -drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) +struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { struct drm_device *dev = priv->head->dev; - drm_hash_item_t *hash; + struct drm_hash_item *hash; int ret; - drm_user_object_t *item; + struct drm_user_object *item; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -64,10 +64,10 @@ drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) if (ret) { return NULL; } - item = drm_hash_entry(hash, drm_user_object_t, hash); + item = drm_hash_entry(hash, struct drm_user_object, hash); if (priv != item->owner) { - drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE]; + struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; ret = drm_ht_find_item(ht, (unsigned long)item, &hash); if (ret) { DRM_ERROR("Object not registered for usage\n"); @@ -77,7 +77,7 @@ drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) return item; } -static void drm_deref_user_object(struct drm_file * priv, drm_user_object_t * item) +static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { struct drm_device *dev = priv->head->dev; int ret; @@ -90,7 +90,7 @@ static void drm_deref_user_object(struct drm_file * priv, drm_user_object_t * it } } -int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item) +int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) { DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -105,7 +105,7 @@ int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item) return 0; } -static int drm_object_ref_action(struct drm_file * priv, drm_user_object_t * ro, +static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, drm_ref_t action) { int ret = 0; @@ -124,12 +124,12 @@ static int drm_object_ref_action(struct drm_file * priv, drm_user_object_t * ro, return ret; } -int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, +int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, drm_ref_t ref_action) { int ret = 0; - drm_ref_object_t *item; - drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + struct drm_ref_object *item; + struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); if (!referenced_object->shareable && priv != referenced_object->owner) { @@ -181,11 +181,11 @@ int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_ob return ret; } -drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, - drm_user_object_t * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, drm_ref_t ref_action) { - drm_hash_item_t *hash; + struct drm_hash_item *hash; int ret; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -194,31 +194,31 @@ drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, if (ret) return NULL; - return drm_hash_entry(hash, drm_ref_object_t, hash); + return drm_hash_entry(hash, struct drm_ref_object, hash); } static void drm_remove_other_references(struct drm_file * priv, - drm_user_object_t * ro) + struct drm_user_object * ro) { int i; - drm_open_hash_t *ht; - drm_hash_item_t *hash; - drm_ref_object_t *item; + struct drm_open_hash *ht; + struct drm_hash_item *hash; + struct drm_ref_object *item; for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { ht = &priv->refd_object_hash[i]; while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { - item = drm_hash_entry(hash, drm_ref_object_t, hash); + item = drm_hash_entry(hash, struct drm_ref_object, hash); drm_remove_ref_object(priv, item); } } } -void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item) +void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) { int ret; - drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; - drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; + struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; + struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; drm_ref_t unref_action; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -245,11 +245,11 @@ void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item) } int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type, drm_user_object_t ** object) + enum drm_object_type type, struct drm_user_object ** object) { struct drm_device *dev = priv->head->dev; - drm_user_object_t *uo; - drm_hash_item_t *hash; + struct drm_user_object *uo; + struct drm_hash_item *hash; int ret; mutex_lock(&dev->struct_mutex); @@ -258,7 +258,7 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, DRM_ERROR("Could not find user object to reference.\n"); goto out_err; } - uo = drm_hash_entry(hash, drm_user_object_t, hash); + uo = drm_hash_entry(hash, struct drm_user_object, hash); if (uo->type != type) { ret = -EINVAL; goto out_err; @@ -275,11 +275,11 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, } int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type) + enum drm_object_type type) { struct drm_device *dev = priv->head->dev; - drm_user_object_t *uo; - drm_ref_object_t *ro; + struct drm_user_object *uo; + struct drm_ref_object *ro; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index cfca5bf0..c4428a7b 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -39,14 +39,14 @@ struct drm_device; #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef enum { +enum drm_object_type { drm_fence_type, drm_buffer_type, drm_ttm_type /* * Add other user space object types here. */ -} drm_object_type_t; +}; /* * A user object is a structure that helps the drm give out user handles @@ -55,10 +55,10 @@ typedef enum { * Designed to be accessible using a user space 32-bit handle. */ -typedef struct drm_user_object { - drm_hash_item_t hash; +struct drm_user_object { + struct drm_hash_item hash; struct list_head list; - drm_object_type_t type; + enum drm_object_type type; atomic_t refcount; int shareable; struct drm_file *owner; @@ -68,7 +68,7 @@ typedef struct drm_user_object { void (*unref) (struct drm_file * priv, struct drm_user_object * obj, drm_ref_t unref_action); void (*remove) (struct drm_file * priv, struct drm_user_object * obj); -} drm_user_object_t; +}; /* * A ref object is a structure which is used to @@ -77,24 +77,24 @@ typedef struct drm_user_object { * process exits. Designed to be accessible using a pointer to the _user_ object. */ -typedef struct drm_ref_object { - drm_hash_item_t hash; +struct drm_ref_object { + struct drm_hash_item hash; struct list_head list; atomic_t refcount; drm_ref_t unref_action; -} drm_ref_object_t; +}; /** * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, +extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable); /** * Must be called with the struct_mutex held. */ -extern drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, +extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* @@ -104,22 +104,22 @@ extern drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, * This function may temporarily release the struct_mutex. */ -extern int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item); +extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); /* * Must be called with the struct_mutex held. May temporarily release it. */ extern int drm_add_ref_object(struct drm_file * priv, - drm_user_object_t * referenced_object, + struct drm_user_object * referenced_object, drm_ref_t ref_action); /* * Must be called with the struct_mutex held. */ -drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, - drm_user_object_t * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, drm_ref_t ref_action); /* * Must be called with the struct_mutex held. @@ -128,19 +128,19 @@ drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item); +extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item); extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type, - drm_user_object_t ** object); + enum drm_object_type type, + struct drm_user_object ** object); extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type); + enum drm_object_type type); /*************************************************** * Fence objects. (drm_fence.c) */ typedef struct drm_fence_object { - drm_user_object_t base; + struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -328,7 +328,7 @@ typedef struct drm_bo_mem_reg { typedef struct drm_buffer_object { struct drm_device *dev; - drm_user_object_t base; + struct drm_user_object base; /* * If there is a possibility that the usage variable is zero, diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index 8e4bfbd8..ece80bed 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -39,12 +39,12 @@ #include "drm_sman.h" typedef struct drm_owner_item { - drm_hash_item_t owner_hash; + struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; } drm_owner_item_t; -void drm_sman_takedown(drm_sman_t * sman) +void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); @@ -56,12 +56,12 @@ void drm_sman_takedown(drm_sman_t * sman) EXPORT_SYMBOL(drm_sman_takedown); int -drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; - sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm), + sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), DRM_MEM_MM); if (!sman->mm) { ret = -ENOMEM; @@ -120,10 +120,10 @@ static unsigned long drm_sman_mm_offset(void *private, void *ref) } int -drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; struct drm_mm *mm; int ret; @@ -153,8 +153,8 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager, EXPORT_SYMBOL(drm_sman_set_range); int -drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, - drm_sman_mm_t * allocator) +drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, + struct drm_sman_mm * allocator) { BUG_ON(manager >= sman->num_managers); sman->mm[manager] = *allocator; @@ -163,11 +163,11 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, } EXPORT_SYMBOL(drm_sman_set_manager); -static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, +static drm_owner_item_t *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; - drm_hash_item_t *owner_hash_item; + struct drm_hash_item *owner_hash_item; drm_owner_item_t *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); @@ -194,14 +194,14 @@ out: return NULL; } -drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager, +struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, unsigned long size, unsigned alignment, unsigned long owner) { void *tmp; - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; drm_owner_item_t *owner_item; - drm_memblock_item_t *memblock; + struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); @@ -246,9 +246,9 @@ out: EXPORT_SYMBOL(drm_sman_alloc); -static void drm_sman_free(drm_memblock_item_t *item) +static void drm_sman_free(struct drm_memblock_item *item) { - drm_sman_t *sman = item->sman; + struct drm_sman *sman = item->sman; list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); @@ -256,22 +256,22 @@ static void drm_sman_free(drm_memblock_item_t *item) drm_free(item, sizeof(*item), DRM_MEM_MM); } -int drm_sman_free_key(drm_sman_t *sman, unsigned int key) +int drm_sman_free_key(struct drm_sman *sman, unsigned int key) { - drm_hash_item_t *hash_item; - drm_memblock_item_t *memblock_item; + struct drm_hash_item *hash_item; + struct drm_memblock_item *memblock_item; if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) return -EINVAL; - memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash); + memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); drm_sman_free(memblock_item); return 0; } EXPORT_SYMBOL(drm_sman_free_key); -static void drm_sman_remove_owner(drm_sman_t *sman, +static void drm_sman_remove_owner(struct drm_sman *sman, drm_owner_item_t *owner_item) { list_del(&owner_item->sman_list); @@ -279,10 +279,10 @@ static void drm_sman_remove_owner(drm_sman_t *sman, drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); } -int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) +int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; + struct drm_hash_item *hash_item; drm_owner_item_t *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { @@ -300,10 +300,10 @@ int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_clean); -static void drm_sman_do_owner_cleanup(drm_sman_t *sman, +static void drm_sman_do_owner_cleanup(struct drm_sman *sman, drm_owner_item_t *owner_item) { - drm_memblock_item_t *entry, *next; + struct drm_memblock_item *entry, *next; list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, owner_list) { @@ -312,10 +312,10 @@ static void drm_sman_do_owner_cleanup(drm_sman_t *sman, drm_sman_remove_owner(sman, owner_item); } -void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner) +void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; + struct drm_hash_item *hash_item; drm_owner_item_t *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { @@ -329,11 +329,11 @@ void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_cleanup); -void drm_sman_cleanup(drm_sman_t *sman) +void drm_sman_cleanup(struct drm_sman *sman) { drm_owner_item_t *entry, *next; unsigned int i; - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { drm_sman_do_owner_cleanup(sman, entry); diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h index ddc732a1..39a39fef 100644 --- a/linux-core/drm_sman.h +++ b/linux-core/drm_sman.h @@ -50,7 +50,7 @@ * for memory management. */ -typedef struct drm_sman_mm { +struct drm_sman_mm { /* private info. If allocated, needs to be destroyed by the destroy function */ void *private; @@ -74,30 +74,30 @@ typedef struct drm_sman_mm { "alloc" function */ unsigned long (*offset) (void *private, void *ref); -} drm_sman_mm_t; +}; -typedef struct drm_memblock_item { +struct drm_memblock_item { struct list_head owner_list; - drm_hash_item_t user_hash; + struct drm_hash_item user_hash; void *mm_info; - drm_sman_mm_t *mm; + struct drm_sman_mm *mm; struct drm_sman *sman; -} drm_memblock_item_t; +}; -typedef struct drm_sman { - drm_sman_mm_t *mm; +struct drm_sman { + struct drm_sman_mm *mm; int num_managers; - drm_open_hash_t owner_hash_tab; - drm_open_hash_t user_hash_tab; + struct drm_open_hash owner_hash_tab; + struct drm_open_hash user_hash_tab; struct list_head owner_items; -} drm_sman_t; +}; /* * Take down a memory manager. This function should only be called after a * successful init and after a call to drm_sman_cleanup. */ -extern void drm_sman_takedown(drm_sman_t * sman); +extern void drm_sman_takedown(struct drm_sman * sman); /* * Allocate structures for a manager. @@ -112,7 +112,7 @@ extern void drm_sman_takedown(drm_sman_t * sman); * */ -extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order); /* @@ -120,7 +120,7 @@ extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, * manager unless a customized allogator is used. */ -extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size); /* @@ -129,23 +129,23 @@ extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, * so it can be destroyed after this call. */ -extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger, - drm_sman_mm_t * allocator); +extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, + struct drm_sman_mm * allocator); /* * Allocate a memory block. Aligment is not implemented yet. */ -extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman, - unsigned int manager, - unsigned long size, - unsigned alignment, - unsigned long owner); +extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, + unsigned int manager, + unsigned long size, + unsigned alignment, + unsigned long owner); /* * Free a memory block identified by its user hash key. */ -extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); +extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); /* * returns 1 iff there are no stale memory blocks associated with this owner. @@ -154,7 +154,7 @@ extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); * resources associated with owner. */ -extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); +extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with this owner. Note that this @@ -164,13 +164,13 @@ extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); * is not going to be referenced anymore. */ -extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner); +extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with the memory manager. * See idling above. */ -extern void drm_sman_cleanup(drm_sman_t * sman); +extern void drm_sman_cleanup(struct drm_sman * sman); #endif diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 306ed453..edbf8bf4 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -93,7 +93,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) mutex_lock(&dev->struct_mutex); #if defined(__linux__) && defined(CONFIG_FB_SIS) { - drm_sman_mm_t sman_mm; + struct drm_sman_mm sman_mm; sman_mm.private = (void *)0xFFFFFFFF; sman_mm.allocate = sis_sman_mm_allocate; sman_mm.free = sis_sman_mm_free; @@ -129,7 +129,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; drm_sis_mem_t mem; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 48f5fd09..1ac51050 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -129,7 +129,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) drm_via_mem_t mem; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index c174e294..57d60133 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -58,7 +58,7 @@ enum sis_family { typedef struct drm_sis_private { drm_local_map_t *mmio; unsigned int idle_fault; - drm_sman_t sman; + struct drm_sman sman; unsigned int chipset; int vram_initialized; int agp_initialized; diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 85ddc53a..9ffc7a51 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -116,7 +116,7 @@ typedef struct drm_via_private { /* Memory manager stuff */ #ifdef VIA_HAVE_CORE_MM unsigned int idle_fault; - drm_sman_t sman; + struct drm_sman sman; int vram_initialized; int agp_initialized; unsigned long vram_offset; From be85ad0333b0c28129c2e4635f92780816308aa6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:37:02 +1000 Subject: [PATCH 118/437] drm: detypedef ttm/bo/fence code --- linux-core/drmP.h | 8 +- linux-core/drm_agpsupport.c | 12 +- linux-core/drm_bo.c | 242 ++++++++++++++++++------------------ linux-core/drm_bo_move.c | 58 ++++----- linux-core/drm_compat.c | 18 +-- linux-core/drm_fence.c | 124 +++++++++--------- linux-core/drm_objects.h | 126 +++++++++---------- linux-core/drm_proc.c | 4 +- linux-core/drm_ttm.c | 40 +++--- linux-core/drm_vm.c | 12 +- linux-core/i915_buffer.c | 26 ++-- linux-core/i915_drv.c | 4 +- linux-core/i915_fence.c | 10 +- linux-core/via_buffer.c | 8 +- linux-core/via_fence.c | 8 +- shared-core/i915_drv.h | 12 +- shared-core/via_drv.c | 4 +- shared-core/via_drv.h | 12 +- 18 files changed, 364 insertions(+), 364 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 87a194af..142a04a1 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -806,8 +806,8 @@ struct drm_device { unsigned int agp_buffer_token; struct drm_head primary; /**< primary screen head */ - drm_fence_manager_t fm; - drm_buffer_manager_t bm; + struct drm_fence_manager fm; + struct drm_buffer_manager bm; /** \name Drawable information */ /*@{ */ @@ -818,7 +818,7 @@ struct drm_device { #if __OS_HAS_AGP struct drm_agp_ttm_backend { - drm_ttm_backend_t backend; + struct drm_ttm_backend backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; @@ -1103,7 +1103,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); -extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); +extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 541d95cd..57c88638 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -554,7 +554,7 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) #define AGP_REQUIRED_MAJOR 0 #define AGP_REQUIRED_MINOR 102 -static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); } @@ -590,7 +590,7 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p return 0; } -static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, unsigned long offset, int cached) { @@ -612,7 +612,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, return ret; } -static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); @@ -624,7 +624,7 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { return 0; } -static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); @@ -640,7 +640,7 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { agp_be->mem = NULL; } -static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be; @@ -656,7 +656,7 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { } } -static drm_ttm_backend_func_t agp_ttm_backend = +static struct drm_ttm_backend_func agp_ttm_backend = { .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, .populate = drm_agp_populate, diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 30664632..a81dfbde 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,10 +49,10 @@ * */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object * bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -63,9 +63,9 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); DRM_ASSERT_LOCKED(&bo->mutex); @@ -74,9 +74,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) list_add_tail(&bo->pinned_lru, &man->pinned); } -void drm_bo_add_to_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -89,7 +89,7 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) } } -static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -133,7 +133,7 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) * Call bo->mutex locked. */ -static int drm_bo_add_ttm(drm_buffer_object_t * bo) +static int drm_bo_add_ttm(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; int ret = 0; @@ -164,16 +164,16 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } -static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int evict, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); - drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; - drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci) @@ -201,7 +201,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint64_t save_flags = old_mem->flags; uint64_t save_mask = old_mem->mask; @@ -266,7 +266,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, * Wait until the buffer is idle. */ -int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait) { int ret; @@ -292,10 +292,10 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; if (bo->fence) { if (bm->nice_mode) { @@ -327,10 +327,10 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) * fence object and removing from lru lists and memory managers. */ -static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -389,10 +389,10 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -440,17 +440,17 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; struct list_head *list, *next; list_for_each_safe(list, next, &bm->ddestroy) { - entry = list_entry(list, drm_buffer_object_t, ddestroy); + entry = list_entry(list, struct drm_buffer_object, ddestroy); nentry = NULL; if (next != &bm->ddestroy) { - nentry = list_entry(next, drm_buffer_object_t, + nentry = list_entry(next, struct drm_buffer_object, ddestroy); atomic_inc(&nentry->usage); } @@ -471,10 +471,10 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) struct drm_device *dev = (struct drm_device *) data; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; #else - drm_buffer_manager_t *bm = - container_of(work, drm_buffer_manager_t, wq.work); + struct drm_buffer_manager *bm = + container_of(work, struct drm_buffer_manager, wq.work); struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif @@ -493,7 +493,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; bo = NULL; @@ -507,8 +507,8 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -516,7 +516,7 @@ static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_obj drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) +static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; struct drm_device *dev = tmp_bo->dev; @@ -538,13 +538,13 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence) + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence) { struct drm_device *dev = priv->head->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; uint32_t fence_type = 0; int count = 0; int ret = 0; @@ -602,7 +602,7 @@ int drm_fence_buffer_objects(struct drm_file * priv, l = f_list.next; while (l != &f_list) { prefetch(l->next); - entry = list_entry(l, drm_buffer_object_t, lru); + entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -635,12 +635,12 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); * bo->mutex locked */ -static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, int no_wait) { int ret = 0; struct drm_device *dev = bo->dev; - drm_bo_mem_reg_t evict_mem; + struct drm_bo_mem_reg evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -706,13 +706,13 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, } static int drm_bo_mem_force_space(struct drm_device * dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) { struct drm_mm_node *node; - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *entry; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + struct drm_mem_type_manager *man = &bm->man[mem_type]; struct list_head *lru; unsigned long num_pages = mem->num_pages; int ret; @@ -728,7 +728,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, if (lru->next == lru) break; - entry = list_entry(lru->next, drm_buffer_object_t, lru); + entry = list_entry(lru->next, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -754,7 +754,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, uint32_t mask, uint32_t * res_mask) { @@ -791,12 +791,12 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, return 1; } -int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait) +int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -883,7 +883,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(drm_buffer_object_t * bo, +static int drm_bo_new_mask(struct drm_buffer_object * bo, uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -921,11 +921,11 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { struct drm_user_object *uo; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; uo = drm_lookup_user_object(priv, handle); @@ -939,7 +939,7 @@ drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, return NULL; } - bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + bo = drm_user_object_entry(uo, struct drm_buffer_object, base); atomic_inc(&bo->usage); return bo; } @@ -950,9 +950,9 @@ drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(drm_buffer_object_t * bo) +static int drm_bo_quick_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -970,9 +970,9 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. */ -static int drm_bo_busy(drm_buffer_object_t * bo) +static int drm_bo_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -990,7 +990,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo) return 0; } -static int drm_bo_read_cached(drm_buffer_object_t * bo) +static int drm_bo_read_cached(struct drm_buffer_object * bo) { int ret = 0; @@ -1004,7 +1004,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) * Wait until a buffer is unmapped. */ -static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) { int ret = 0; @@ -1020,7 +1020,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) return ret; } -static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object * bo) { int ret; @@ -1042,7 +1042,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo) * the buffer "unfenced" after validating, but before fencing. */ -static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, int eagain_if_wait) { int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1075,7 +1075,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, * Bo locked. */ -static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, +static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; @@ -1106,7 +1106,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; struct drm_device *dev = priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1186,7 +1186,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; struct drm_ref_object *ro; int ret = 0; @@ -1219,8 +1219,8 @@ static void drm_buffer_user_object_unmap(struct drm_file * priv, struct drm_user_object * uo, drm_ref_t action) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); /* * We DON'T want to take the bo->lock here, because we want to @@ -1238,13 +1238,13 @@ static void drm_buffer_user_object_unmap(struct drm_file * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; /* * Flush outstanding fences. */ @@ -1300,7 +1300,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; } -static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) { uint32_t flag_diff = (mem->mask ^ mem->flags); @@ -1318,10 +1318,10 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1360,13 +1360,13 @@ static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) * bo locked. */ -static int drm_buffer_object_validate(drm_buffer_object_t * bo, +static int drm_buffer_object_validate(struct drm_buffer_object * bo, uint32_t fence_class, int move_unfenced, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; uint32_t ftype; int ret; @@ -1496,7 +1496,7 @@ static int drm_bo_handle_validate(struct drm_file * priv, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1536,7 +1536,7 @@ static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); @@ -1559,7 +1559,7 @@ static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; @@ -1594,10 +1594,10 @@ int drm_buffer_object_create(struct drm_device *dev, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, - drm_buffer_object_t ** buf_obj) + struct drm_buffer_object ** buf_obj) { - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *bo; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *bo; int ret = 0; unsigned long num_pages; @@ -1672,7 +1672,7 @@ int drm_buffer_object_create(struct drm_device *dev, return ret; } -static int drm_bo_add_user_object(struct drm_file * priv, drm_buffer_object_t * bo, +static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_object * bo, int shareable) { struct drm_device *dev = priv->head->dev; @@ -1769,7 +1769,7 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) struct drm_bo_create_arg arg; struct drm_bo_create_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; int ret = 0; if (!dev->bm.initialized) { @@ -1975,16 +1975,16 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) static void drm_bo_clean_unfenced(struct drm_device *dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; head = &bm->unfenced; list = head->next; while(list != head) { prefetch(list->next); - entry = list_entry(list, drm_buffer_object_t, lru); + entry = list_entry(list, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -1999,7 +1999,7 @@ static void drm_bo_clean_unfenced(struct drm_device *dev) } } -static int drm_bo_leave_list(drm_buffer_object_t * bo, +static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) { @@ -2050,13 +2050,13 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, } -static drm_buffer_object_t *drm_bo_entry(struct list_head *list, +static struct drm_buffer_object *drm_bo_entry(struct list_head *list, int pinned_list) { if (pinned_list) - return list_entry(list, drm_buffer_object_t, pinned_lru); + return list_entry(list, struct drm_buffer_object, pinned_lru); else - return list_entry(list, drm_buffer_object_t, lru); + return list_entry(list, struct drm_buffer_object, lru); } /* @@ -2071,7 +2071,7 @@ static int drm_bo_force_list_clean(struct drm_device * dev, int pinned_list) { struct list_head *list, *next, *prev; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; int ret; int do_restart; @@ -2130,8 +2130,8 @@ restart: int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -2173,8 +2173,8 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); @@ -2200,9 +2200,9 @@ int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); @@ -2247,10 +2247,10 @@ EXPORT_SYMBOL(drm_bo_init_mm); int drm_bo_driver_finish(struct drm_device * dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2298,8 +2298,8 @@ int drm_bo_driver_finish(struct drm_device * dev) int drm_bo_driver_init(struct drm_device * dev) { - drm_bo_driver_t *driver = dev->driver->bo_driver; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; mutex_lock(&dev->bm.init_mutex); @@ -2339,8 +2339,8 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_init_arg arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2396,8 +2396,8 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2438,7 +2438,7 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2465,7 +2465,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2492,10 +2492,10 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(struct drm_device * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) @@ -2526,13 +2526,13 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(struct drm_device * dev, - drm_bo_mem_reg_t * mem, +int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; *bus_size = 0; if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) @@ -2555,7 +2555,7 @@ int drm_bo_pci_offset(struct drm_device * dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2567,7 +2567,7 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; @@ -2593,7 +2593,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_bo_usage_deref_locked(&bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1e0d26ce..5e21173c 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -35,9 +35,9 @@ * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t * bo) +static void drm_bo_free_old_node(struct drm_buffer_object * bo) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); @@ -48,11 +48,11 @@ static void drm_bo_free_old_node(drm_buffer_object_t * bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -102,11 +102,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void **virtual) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -137,11 +137,11 @@ int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(struct drm_device * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm; + struct drm_mem_type_manager *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; @@ -164,7 +164,7 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -182,7 +182,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -200,14 +200,14 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) return 0; } -int drm_bo_move_memcpy(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; - drm_bo_mem_reg_t old_copy = *old_mem; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + struct drm_bo_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -281,12 +281,12 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t * bo, - drm_buffer_object_t ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object * bo, + struct drm_buffer_object ** new_obj) { - drm_buffer_object_t *fbo; + struct drm_buffer_object *fbo; struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) @@ -323,20 +323,20 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, +int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, - uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) + uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; - drm_buffer_object_t *old_obj; + struct drm_buffer_object *old_obj; if (bo->fence) drm_fence_usage_deref_unlocked(&bo->fence); diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 5d1d62fa..38ca497f 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -201,7 +201,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; @@ -351,7 +351,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; drm_ttm_t *ttm; @@ -395,7 +395,7 @@ out_unlock: int drm_bo_map_bound(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; int ret = 0; unsigned long bus_base; unsigned long bus_offset; @@ -418,7 +418,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) } -int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; @@ -454,7 +454,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) return 0; } -void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; @@ -486,7 +486,7 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) -int drm_bo_lock_kmm(drm_buffer_object_t * bo) +int drm_bo_lock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; int lock_ok = 1; @@ -518,7 +518,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo) return -EAGAIN; } -void drm_bo_unlock_kmm(drm_buffer_object_t * bo) +void drm_bo_unlock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; @@ -529,7 +529,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo) } } -int drm_bo_remap_bound(drm_buffer_object_t *bo) +int drm_bo_remap_bound(struct drm_buffer_object *bo) { vma_entry_t *v_entry; int ret = 0; @@ -545,7 +545,7 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo) return ret; } -void drm_bo_finish_unmap(drm_buffer_object_t *bo) +void drm_bo_finish_unmap(struct drm_buffer_object *bo) { vma_entry_t *v_entry; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index f925621a..9b2fa405 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -40,11 +40,11 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, int wake = 0; uint32_t diff; uint32_t relevant; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; - drm_fence_object_t *fence, *next; + struct drm_fence_object *fence, *next; int found = 0; int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; @@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_fence_handler); static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -124,11 +124,11 @@ static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); *fence = NULL; @@ -142,11 +142,11 @@ void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) } } -void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { @@ -182,20 +182,20 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { - drm_fence_object_t *fence = - drm_user_object_entry(base, drm_fence_object_t, base); + struct drm_fence_object *fence = + drm_user_object_entry(base, struct drm_fence_object, base); drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_fence_object_t * fence, +int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t mask, int poke_flush) { unsigned long flags; int signaled; struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) driver->poke_flush(dev, fence->class); @@ -207,8 +207,8 @@ int drm_fence_object_signaled(drm_fence_object_t * fence, return signaled; } -static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, - drm_fence_driver_t * driver, uint32_t sequence) +static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, + struct drm_fence_driver * driver, uint32_t sequence) { uint32_t diff; @@ -224,13 +224,13 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_fence_object_t * fence, +int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; if (type & ~fence->type) { @@ -264,12 +264,12 @@ int drm_fence_object_flush(drm_fence_object_t * fence, void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t diff; write_lock_irqsave(&fm->lock, flags); @@ -290,7 +290,7 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t seque mutex_unlock(&dev->struct_mutex); return; } - fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); @@ -302,13 +302,13 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t seque EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_fence_object_t *fence, +static int drm_fence_lazy_wait(struct drm_fence_object *fence, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -336,11 +336,11 @@ static int drm_fence_lazy_wait(drm_fence_object_t *fence, return 0; } -int drm_fence_object_wait(drm_fence_object_t * fence, +int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_driver *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; int signaled; @@ -403,13 +403,13 @@ int drm_fence_object_wait(drm_fence_object_t * fence, return 0; } -int drm_fence_object_emit(drm_fence_object_t * fence, +int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -438,11 +438,11 @@ int drm_fence_object_emit(drm_fence_object_t * fence, static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, - drm_fence_object_t * fence) + struct drm_fence_object * fence) { int ret = 0; unsigned long flags; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); @@ -471,7 +471,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -492,11 +492,11 @@ out: EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, - unsigned flags, drm_fence_object_t ** c_fence) + unsigned flags, struct drm_fence_object ** c_fence) { - drm_fence_object_t *fence; + struct drm_fence_object *fence; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) @@ -516,9 +516,9 @@ EXPORT_SYMBOL(drm_fence_object_create); void drm_fence_manager_init(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *class; - drm_fence_driver_t *fed = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *class; + struct drm_fence_driver *fed = dev->driver->fence_driver; int i; rwlock_init(&fm->lock); @@ -548,11 +548,11 @@ void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) +struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; struct drm_user_object *uo; - drm_fence_object_t *fence; + struct drm_fence_object *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); @@ -560,7 +560,7 @@ drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t han mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); mutex_unlock(&dev->struct_mutex); return fence; } @@ -569,9 +569,9 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -617,7 +617,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; struct drm_user_object *uo; ret = 0; @@ -645,9 +645,9 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; struct drm_user_object *uo; unsigned long flags; ret = 0; @@ -679,7 +679,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; ret = 0; @@ -696,9 +696,9 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -728,9 +728,9 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -762,9 +762,9 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -798,9 +798,9 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -833,9 +833,9 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index c4428a7b..441c19f2 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -139,7 +139,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, * Fence objects. (drm_fence.c) */ -typedef struct drm_fence_object { +struct drm_fence_object { struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -156,29 +156,29 @@ typedef struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; -} drm_fence_object_t; +}; #define _DRM_FENCE_CLASSES 8 #define _DRM_FENCE_TYPE_EXE 0x00 -typedef struct drm_fence_class_manager { +struct drm_fence_class_manager { struct list_head ring; uint32_t pending_flush; wait_queue_head_t fence_queue; int pending_exe_flush; uint32_t last_exe_flush; uint32_t exe_flush_sequence; -} drm_fence_class_manager_t; +}; -typedef struct drm_fence_manager { +struct drm_fence_manager { int initialized; rwlock_t lock; - drm_fence_class_manager_t class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; -} drm_fence_manager_t; +}; -typedef struct drm_fence_driver { +struct drm_fence_driver { uint32_t num_classes; uint32_t wrap_diff; uint32_t flush_diff; @@ -189,7 +189,7 @@ typedef struct drm_fence_driver { int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); void (*poke_flush) (struct drm_device * dev, uint32_t class); -} drm_fence_driver_t; +}; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, uint32_t sequence, uint32_t type); @@ -197,21 +197,21 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, +extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); -extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src); -extern int drm_fence_object_wait(drm_fence_object_t * fence, +extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, - drm_fence_object_t ** c_fence); + struct drm_fence_object ** c_fence); extern int drm_fence_add_user_object(struct drm_file * priv, - drm_fence_object_t * fence, int shareable); + struct drm_fence_object * fence, int shareable); extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); @@ -243,7 +243,7 @@ extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 struct drm_ttm_backend; -typedef struct drm_ttm_backend_func { +struct drm_ttm_backend_func { int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); int (*populate) (struct drm_ttm_backend * backend, unsigned long num_pages, struct page ** pages); @@ -252,16 +252,16 @@ typedef struct drm_ttm_backend_func { unsigned long offset, int cached); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); -} drm_ttm_backend_func_t; +}; -typedef struct drm_ttm_backend { +struct drm_ttm_backend { uint32_t flags; int mem_type; - drm_ttm_backend_func_t *func; -} drm_ttm_backend_t; + struct drm_ttm_backend_func *func; +}; -typedef struct drm_ttm { +struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; @@ -270,7 +270,7 @@ typedef struct drm_ttm { struct drm_device *dev; int destroy; uint32_t mapping_offset; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; enum { ttm_bound, ttm_evicted, @@ -278,14 +278,14 @@ typedef struct drm_ttm { ttm_unpopulated, } state; -} drm_ttm_t; +}; -extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); -extern void drm_ttm_unbind(drm_ttm_t * ttm); -extern void drm_ttm_evict(drm_ttm_t * ttm); -extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); -extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); +extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern void drm_ttm_unbind(struct drm_ttm * ttm); +extern void drm_ttm_evict(struct drm_ttm * ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -293,7 +293,7 @@ extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); * when the last vma exits. */ -extern int drm_destroy_ttm(drm_ttm_t * ttm); +extern int drm_destroy_ttm(struct drm_ttm * ttm); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -316,7 +316,7 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); * Buffer objects. (drm_bo.c, drm_bo_move.c) */ -typedef struct drm_bo_mem_reg { +struct drm_bo_mem_reg { struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; @@ -324,9 +324,9 @@ typedef struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; -} drm_bo_mem_reg_t; +}; -typedef struct drm_buffer_object { +struct drm_buffer_object { struct drm_device *dev; struct drm_user_object base; @@ -340,14 +340,14 @@ typedef struct drm_buffer_object { enum drm_bo_type type; unsigned long offset; atomic_t mapped; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; struct list_head lru; struct list_head ddestroy; uint32_t fence_type; uint32_t fence_class; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; @@ -359,7 +359,7 @@ typedef struct drm_buffer_object { /* For vm */ - drm_ttm_t *ttm; + struct drm_ttm *ttm; struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; @@ -372,12 +372,12 @@ typedef struct drm_buffer_object { struct list_head p_mm_list; #endif -} drm_buffer_object_t; +}; #define _DRM_BO_FLAG_UNFENCED 0x00000001 #define _DRM_BO_FLAG_EVICTED 0x00000002 -typedef struct drm_mem_type_manager { +struct drm_mem_type_manager { int has_type; int use_type; struct drm_mm manager; @@ -388,7 +388,7 @@ typedef struct drm_mem_type_manager { unsigned long io_offset; unsigned long io_size; void *io_addr; -} drm_mem_type_manager_t; +}; #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ @@ -398,13 +398,13 @@ typedef struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ -typedef struct drm_buffer_manager { +struct drm_buffer_manager { struct mutex init_mutex; struct mutex evict_mutex; int nice_mode; int initialized; struct drm_file *last_to_validate; - drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -415,23 +415,23 @@ typedef struct drm_buffer_manager { uint32_t fence_type; unsigned long cur_pages; atomic_t count; -} drm_buffer_manager_t; +}; -typedef struct drm_bo_driver { +struct drm_bo_driver { const uint32_t *mem_type_prio; const uint32_t *mem_busy_prio; uint32_t num_mem_type_prio; uint32_t num_mem_busy_prio; - drm_ttm_backend_t *(*create_ttm_backend_entry) + struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man); + struct drm_mem_type_manager * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -} drm_bo_driver_t; +}; /* * buffer objects (drm_bo.c) @@ -455,24 +455,24 @@ extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); +extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); -extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait); -extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +extern int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); /* @@ -480,18 +480,18 @@ extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * drm_bo_move.c */ -extern int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, int evict, - int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, uint32_t fence_flags, - drm_bo_mem_reg_t * new_mem); + struct drm_bo_mem_reg * new_mem); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index f33bd93d..3f9cb028 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -434,8 +434,8 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, { struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_buffer_manager_t *bm = &dev->bm; - drm_fence_manager_t *fm = &dev->fm; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_fence_manager *fm = &dev->fm; drm_u64_t used_mem; drm_u64_t low_mem; drm_u64_t high_mem; diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 31503c9c..60c64cba 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -45,7 +45,7 @@ static void drm_ttm_cache_flush(void) * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(drm_ttm_t * ttm) +static void ttm_alloc_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -66,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t * ttm) } } -static void ttm_free_pages(drm_ttm_t * ttm) +static void ttm_free_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -105,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void) * for range of pages in a ttm. */ -static int drm_set_caching(drm_ttm_t * ttm, int noncached) +static int drm_set_caching(struct drm_ttm * ttm, int noncached) { int i; struct page **cur_page; @@ -142,12 +142,12 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) * Free all resources associated with a ttm. */ -int drm_destroy_ttm(drm_ttm_t * ttm) +int drm_destroy_ttm(struct drm_ttm * ttm) { int i; struct page **cur_page; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return 0; @@ -159,7 +159,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } if (ttm->pages) { - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) drm_set_caching(ttm, 0); @@ -191,10 +191,10 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } -struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) { struct page *p; - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; p = ttm->pages[index]; if (!p) { @@ -207,11 +207,11 @@ struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) return p; } -static int drm_ttm_populate(drm_ttm_t * ttm) +static int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (ttm->state != ttm_unpopulated) return 0; @@ -231,10 +231,10 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) { - drm_bo_driver_t *bo_driver = dev->driver->bo_driver; - drm_ttm_t *ttm; + struct drm_bo_driver *bo_driver = dev->driver->bo_driver; + struct drm_ttm *ttm; if (!bo_driver) return NULL; @@ -275,9 +275,9 @@ drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) * Unbind a ttm region from the aperture. */ -void drm_ttm_evict(drm_ttm_t * ttm) +void drm_ttm_evict(struct drm_ttm * ttm) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; int ret; if (ttm->state == ttm_bound) { @@ -288,11 +288,11 @@ void drm_ttm_evict(drm_ttm_t * ttm) ttm->state = ttm_evicted; } -void drm_ttm_fixup_caching(drm_ttm_t * ttm) +void drm_ttm_fixup_caching(struct drm_ttm * ttm) { if (ttm->state == ttm_evicted) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; if (be->func->needs_ub_cache_adjust(be)) { drm_set_caching(ttm, 0); } @@ -300,7 +300,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm) } } -void drm_ttm_unbind(drm_ttm_t * ttm) +void drm_ttm_unbind(struct drm_ttm * ttm) { if (ttm->state == ttm_bound) drm_ttm_evict(ttm); @@ -308,11 +308,11 @@ void drm_ttm_unbind(drm_ttm_t * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) { int ret = 0; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return -EINVAL; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index de2fba1a..265a59d8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -713,10 +713,10 @@ EXPORT_SYMBOL(drm_mmap); static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; + struct drm_ttm *ttm; struct drm_device *dev; unsigned long pfn; int err; @@ -766,7 +766,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -798,7 +798,7 @@ out_unlock: static void drm_bo_vm_open_locked(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; drm_vm_open_locked(vma); atomic_inc(&bo->usage); @@ -815,7 +815,7 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); @@ -831,7 +831,7 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; struct drm_device *dev = bo->dev; drm_vm_close(vma); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 6aeccfcb..bf500cc6 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,12 +33,12 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) { if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; @@ -64,7 +64,7 @@ int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) } int i915_init_mem_type(struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man) + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -105,7 +105,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, return 0; } -uint32_t i915_evict_mask(drm_buffer_object_t *bo) +uint32_t i915_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: @@ -150,10 +150,10 @@ static void i915_emit_copy_blit(struct drm_device * dev, return; } -static int i915_move_blit(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_blit(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; int dir = 0; if ((old_mem->mem_type == new_mem->mem_type) && @@ -180,11 +180,11 @@ static int i915_move_blit(drm_buffer_object_t * bo, * then blit and subsequently move out again. */ -static int i915_move_flip(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_flip(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_bo_mem_reg_t tmp_mem; + struct drm_bo_mem_reg tmp_mem; int ret; tmp_mem = *new_mem; @@ -216,10 +216,10 @@ out_cleanup: return ret; } -int i915_move(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int i915_move(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 49437066..e337e1d2 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -39,7 +39,7 @@ static struct pci_device_id pciidlist[] = { }; #ifdef I915_HAVE_FENCE -static drm_fence_driver_t i915_fence_driver = { +static struct drm_fence_driver i915_fence_driver = { .num_classes = 1, .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), .flush_diff = (1U << (BREADCRUMB_BITS - 2)), @@ -55,7 +55,7 @@ static drm_fence_driver_t i915_fence_driver = { static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t i915_bo_driver = { +static struct drm_bo_driver i915_bo_driver = { .mem_type_prio = i915_mem_prios, .mem_busy_prio = i915_busy_prios, .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index a71e5dac..6f0de2ca 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -41,9 +41,9 @@ static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[0]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; uint32_t i_status; @@ -111,7 +111,7 @@ static void i915_perform_flush(struct drm_device * dev) void i915_poke_flush(struct drm_device * dev, uint32_t class) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -137,7 +137,7 @@ int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t f void i915_fence_handler(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; write_lock(&fm->lock); i915_perform_flush(dev); diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index e452611d..0461b3c7 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,12 +32,12 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) { *type = 3; return 0; @@ -82,7 +82,7 @@ static int via_vram_info(struct drm_device *dev, } int via_init_mem_type(struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man) + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -143,7 +143,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type, return 0; } -uint32_t via_evict_mask(drm_buffer_object_t *bo) +uint32_t via_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index ce4366d2..a8db3d12 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -42,7 +42,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_class_manager_t *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -155,7 +155,7 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; uint32_t pending_flush; @@ -202,9 +202,9 @@ void via_fence_timer(unsigned long data) { struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - drm_fence_class_manager_t *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.class[0]; if (!dev_priv) return; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 1ed37c63..ee2b474b 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -197,14 +197,14 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f #ifdef I915_HAVE_BUFFER /* i915_buffer.c */ -extern drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device *dev); -extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *type); +extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); +extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type); extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, - drm_mem_type_manager_t *man); -extern uint32_t i915_evict_mask(drm_buffer_object_t *bo); -extern int i915_move(drm_buffer_object_t *bo, int evict, - int no_wait, drm_bo_mem_reg_t *new_mem); + struct drm_mem_type_manager *man); +extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); +extern int i915_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); #endif diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c index 0a478fef..9f099555 100644 --- a/shared-core/via_drv.c +++ b/shared-core/via_drv.c @@ -40,7 +40,7 @@ static struct pci_device_id pciidlist[] = { #ifdef VIA_HAVE_FENCE -static drm_fence_driver_t via_fence_driver = { +static struct drm_fence_driver via_fence_driver = { .num_classes = 1, .wrap_diff = (1 << 30), .flush_diff = (1 << 20), @@ -65,7 +65,7 @@ static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t via_bo_driver = { +static struct drm_bo_driver via_bo_driver = { .mem_type_prio = via_mem_prios, .mem_busy_prio = via_busy_prios, .num_mem_type_prio = ARRAY_SIZE(via_mem_prios), diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 9ffc7a51..05935c81 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -204,14 +204,14 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class, #endif #ifdef VIA_HAVE_BUFFER -extern drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device *dev); -extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *type); +extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev); +extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type); extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); extern int via_init_mem_type(struct drm_device *dev, uint32_t type, - drm_mem_type_manager_t *man); -extern uint32_t via_evict_mask(drm_buffer_object_t *bo); -extern int via_move(drm_buffer_object_t *bo, int evict, - int no_wait, drm_bo_mem_reg_t *new_mem); + struct drm_mem_type_manager *man); +extern uint32_t via_evict_mask(struct drm_buffer_object *bo); +extern int via_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); #endif #endif From 24311d5d82b61a4729b15355088dd9c2898d1089 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:42:11 +1000 Subject: [PATCH 119/437] drm: remove drm_buf_t --- linux-core/drmP.h | 6 +++--- linux-core/drm_bufs.c | 16 ++++++++-------- linux-core/drm_dma.c | 2 +- linux-core/i810_dma.c | 26 +++++++++++++------------- linux-core/i810_drv.h | 2 +- shared-core/mach64_dma.c | 10 +++++----- shared-core/mach64_drv.h | 10 +++++----- shared-core/mach64_state.c | 4 ++-- shared-core/mga_dma.c | 8 ++++---- shared-core/mga_drv.h | 4 ++-- shared-core/mga_state.c | 8 ++++---- shared-core/r128_drv.h | 2 +- shared-core/r300_cmdbuf.c | 4 ++-- shared-core/radeon_cp.c | 12 ++++++------ shared-core/radeon_drv.h | 4 ++-- shared-core/savage_bci.c | 10 +++++----- shared-core/savage_drv.h | 4 ++-- shared-core/savage_state.c | 8 ++++---- 18 files changed, 70 insertions(+), 70 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 142a04a1..9a79b0df 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -311,7 +311,7 @@ struct drm_vma_entry { /** * DMA buffer. */ -typedef struct drm_buf { +struct drm_buf { int idx; /**< Index into master buflist */ int total; /**< Buffer size */ int order; /**< log-base-2(total) */ @@ -337,7 +337,7 @@ typedef struct drm_buf { int dev_priv_size; /**< Size of buffer private storage */ void *dev_private; /**< Per-buffer private storage */ -} drm_buf_t; +}; /** bufs is one longer than it has to be */ struct drm_waitlist { @@ -1051,7 +1051,7 @@ extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, /* DMA support (drm_dma.h) */ extern int drm_dma_setup(struct drm_device *dev); extern void drm_dma_takedown(struct drm_device *dev); -extern void drm_free_buffer(struct drm_device *dev, drm_buf_t * buf); +extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); /* IRQ support (drm_irq.h) */ diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index f766597b..c1e23b5c 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -574,7 +574,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) int total; int byte_count; int i, valid; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!dma) return -EINVAL; @@ -738,14 +738,14 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) int page_order; struct drm_buf_entry *entry; drm_dma_handle_t *dmah; - drm_buf_t *buf; + struct drm_buf *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; @@ -958,7 +958,7 @@ static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; - drm_buf_t *buf; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -969,7 +969,7 @@ static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; @@ -1120,7 +1120,7 @@ int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; - drm_buf_t *buf; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -1131,7 +1131,7 @@ int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) return -EINVAL; @@ -1492,7 +1492,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, struct drm_buf_free request; int i; int idx; - drm_buf_t *buf; + struct drm_buf *buf; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index 6990f8d4..d2a88d52 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -129,7 +129,7 @@ void drm_dma_takedown(struct drm_device * dev) * * Resets the fields of \p buf. */ -void drm_free_buffer(struct drm_device * dev, drm_buf_t * buf) +void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) { if (!buf) return; diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 31dc1c86..1e6d8cd3 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -64,7 +64,7 @@ static inline void i810_print_status_page(struct drm_device * dev) } } -static drm_buf_t *i810_freelist_get(struct drm_device * dev) +static struct drm_buf *i810_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; int i; @@ -73,7 +73,7 @@ static drm_buf_t *i810_freelist_get(struct drm_device * dev) /* Linear search might not be the best solution */ for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; /* In use is already a pointer */ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, @@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(struct drm_device * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(struct drm_device * dev, drm_buf_t * buf) +static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -109,7 +109,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) struct drm_file *priv = filp->private_data; struct drm_device *dev; drm_i810_private_t *dev_priv; - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; lock_kernel(); @@ -139,7 +139,7 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(drm_buf_t * buf, struct file *filp) +static int i810_map_buffer(struct drm_buf * buf, struct file *filp) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->head->dev; @@ -171,7 +171,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp) return retcode; } -static int i810_unmap_buffer(drm_buf_t * buf) +static int i810_unmap_buffer(struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; @@ -194,7 +194,7 @@ static int i810_unmap_buffer(drm_buf_t * buf) static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, struct file *filp) { - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; int retcode = 0; @@ -252,7 +252,7 @@ static int i810_dma_cleanup(struct drm_device * dev) dev->dev_private = NULL; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf_priv->kernel_virtual && buf->total) @@ -320,7 +320,7 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_ } for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; buf_priv->in_use = hw_status++; @@ -807,7 +807,7 @@ static void i810_dma_dispatch_swap(struct drm_device * dev) } static void i810_dma_dispatch_vertex(struct drm_device * dev, - drm_buf_t * buf, int discard, int used) + struct drm_buf * buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; @@ -971,7 +971,7 @@ static int i810_flush_queue(struct drm_device * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, @@ -1002,7 +1002,7 @@ static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) i810_flush_queue(dev); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf->filp == filp && buf_priv) { @@ -1161,7 +1161,7 @@ static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static void i810_dma_dispatch_mc(struct drm_device * dev, drm_buf_t * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 06eac774..3627d774 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -88,7 +88,7 @@ typedef struct drm_i810_private { dma_addr_t dma_status_page; - drm_buf_t *mmap_buffer; + struct drm_buf *mmap_buffer; u32 front_di1, back_di1, zi1; diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index c787260a..d833475f 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -418,7 +418,7 @@ void mach64_dump_engine_info(drm_mach64_private_t * dev_priv) * pointed by the ring head. */ static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv, - drm_buf_t * buf) + struct drm_buf * buf) { u32 addr = GETBUFADDR(buf); u32 used = buf->used >> 2; @@ -522,7 +522,7 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv) list_for_each(ptr, &dev_priv->pending) { drm_mach64_freelist_t *entry = list_entry(ptr, drm_mach64_freelist_t, list); - drm_buf_t *buf = entry->buf; + struct drm_buf *buf = entry->buf; u32 buf_addr = GETBUFADDR(buf); @@ -974,7 +974,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv) volatile u32 *ring_read; struct list_head *ptr; drm_mach64_freelist_t *entry; - drm_buf_t *buf = NULL; + struct drm_buf *buf = NULL; u32 *buf_ptr; u32 used, reg, target; int fifo, count, found, ret, no_idle_wait; @@ -1381,7 +1381,7 @@ static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv) return 1; } -drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) +struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv) { drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; drm_mach64_freelist_t *entry; @@ -1427,7 +1427,7 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) return entry->buf; } -int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf) +int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_buf) { struct list_head *ptr; drm_mach64_freelist_t *entry; diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index 31b8247a..a1b36751 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -55,7 +55,7 @@ typedef struct drm_mach64_freelist { struct list_head list; /* List pointers for free_list, placeholders, or pending list */ - drm_buf_t *buf; /* Pointer to the buffer */ + struct drm_buf *buf; /* Pointer to the buffer */ int discard; /* This flag is set when we're done (re)using a buffer */ u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */ } drm_mach64_freelist_t; @@ -121,9 +121,9 @@ extern void mach64_driver_lastclose(struct drm_device * dev); extern int mach64_init_freelist(struct drm_device * dev); extern void mach64_destroy_freelist(struct drm_device * dev); -extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv); +extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv); extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, - drm_buf_t * copy_buf); + struct drm_buf * copy_buf); extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries); @@ -798,7 +798,7 @@ do { \ #define DMALOCALS \ drm_mach64_freelist_t *_entry = NULL; \ - drm_buf_t *_buf = NULL; \ + struct drm_buf *_buf = NULL; \ u32 *_buf_wptr; int _outcount #define GETBUFPTR( __buf ) \ @@ -813,7 +813,7 @@ do { \ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t * dev_priv, drm_mach64_freelist_t ** - entry, drm_buf_t * buf) + entry, struct drm_buf * buf) { struct list_head *ptr; #if MACH64_EXTRA_CHECKING diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 4e8291af..95ad1ec3 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -550,7 +550,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_buf_t *copy_buf; + struct drm_buf *copy_buf; void *buf = vertex->buf; unsigned long used = vertex->used; int ret = 0; @@ -646,7 +646,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, struct drm_device * dev, drm_mach64_private_t *dev_priv = dev->dev_private; int dword_shift, dwords; unsigned long used; - drm_buf_t *copy_buf; + struct drm_buf *copy_buf; int verify_ret = 0; DMALOCALS; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index 0a3c2729..9bed3b34 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -314,7 +314,7 @@ static void mga_freelist_cleanup(struct drm_device * dev) static void mga_freelist_reset(struct drm_device * dev) { drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; int i; @@ -326,7 +326,7 @@ static void mga_freelist_reset(struct drm_device * dev) } #endif -static drm_buf_t *mga_freelist_get(struct drm_device * dev) +static struct drm_buf *mga_freelist_get(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *next; @@ -359,7 +359,7 @@ static drm_buf_t *mga_freelist_get(struct drm_device * dev) return NULL; } -int mga_freelist_put(struct drm_device * dev, drm_buf_t * buf) +int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -1091,7 +1091,7 @@ int mga_dma_reset(DRM_IOCTL_ARGS) static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { - drm_buf_t *buf; + struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h index e1fdf403..10096a95 100644 --- a/shared-core/mga_drv.h +++ b/shared-core/mga_drv.h @@ -65,7 +65,7 @@ typedef struct drm_mga_freelist { struct drm_mga_freelist *next; struct drm_mga_freelist *prev; drm_mga_age_t age; - drm_buf_t *buf; + struct drm_buf *buf; } drm_mga_freelist_t; typedef struct { @@ -168,7 +168,7 @@ extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); -extern int mga_freelist_put(struct drm_device * dev, drm_buf_t * buf); +extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); /* mga_warp.c */ extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 72db0ced..6d93c9e4 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -651,7 +651,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev) DRM_DEBUG("%s... done.\n", __FUNCTION__); } -static void mga_dma_dispatch_vertex(struct drm_device * dev, drm_buf_t * buf) +static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -698,7 +698,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, drm_buf_t * buf) FLUSH_DMA(); } -static void mga_dma_dispatch_indices(struct drm_device * dev, drm_buf_t * buf, +static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, unsigned int start, unsigned int end) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -747,7 +747,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, drm_buf_t * buf, /* This copies a 64 byte aligned agp region to the frambuffer with a * standard blit, the ioctl needs to do checking. */ -static void mga_dma_dispatch_iload(struct drm_device * dev, drm_buf_t * buf, +static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, unsigned int dstorg, unsigned int length) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -998,7 +998,7 @@ static int mga_dma_iload(DRM_IOCTL_ARGS) DRM_DEVICE; struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t iload; DRM_DEBUG("\n"); diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index 0791713a..c9abd67b 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -57,7 +57,7 @@ typedef struct drm_r128_freelist { unsigned int age; - drm_buf_t *buf; + struct drm_buf *buf; struct drm_r128_freelist *next; struct drm_r128_freelist *prev; } drm_r128_freelist_t; diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index 08015ecf..9cf352ae 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -706,7 +706,7 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must * be careful about how this function is called. */ -static void r300_discard_buffer(struct drm_device * dev, drm_buf_t * buf) +static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -785,7 +785,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; - drm_buf_t *buf = NULL; + struct drm_buf *buf = NULL; int emit_dispatch_age = 0; int ret = 0; diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index cef47ca0..40a20e6c 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -2066,12 +2066,12 @@ int radeon_fullscreen(DRM_IOCTL_ARGS) * they can't get the lock. */ -drm_buf_t *radeon_freelist_get(struct drm_device * dev) +struct drm_buf *radeon_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; - drm_buf_t *buf; + struct drm_buf *buf; int i, t; int start; @@ -2106,12 +2106,12 @@ drm_buf_t *radeon_freelist_get(struct drm_device * dev) } #if 0 -drm_buf_t *radeon_freelist_get(struct drm_device * dev) +struct drm_buf *radeon_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; - drm_buf_t *buf; + struct drm_buf *buf; int i, t; int start; u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); @@ -2148,7 +2148,7 @@ void radeon_freelist_reset(struct drm_device * dev) dev_priv->last_buf = 0; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } @@ -2194,7 +2194,7 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { int i; - drm_buf_t *buf; + struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index dfa811c2..2dca1e70 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -156,7 +156,7 @@ enum radeon_chip_flags { typedef struct drm_radeon_freelist { unsigned int age; - drm_buf_t *buf; + struct drm_buf *buf; struct drm_radeon_freelist *next; struct drm_radeon_freelist *prev; } drm_radeon_freelist_t; @@ -346,7 +346,7 @@ extern int radeon_fullscreen(DRM_IOCTL_ARGS); extern int radeon_cp_buffers(DRM_IOCTL_ARGS); extern void radeon_freelist_reset(struct drm_device * dev); -extern drm_buf_t *radeon_freelist_get(struct drm_device * dev); +extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 5a41b238..a3fd8994 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -207,7 +207,7 @@ static int savage_freelist_init(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; - drm_buf_t *buf; + struct drm_buf *buf; drm_savage_buf_priv_t *entry; int i; DRM_DEBUG("count=%d\n", dma->buf_count); @@ -236,7 +236,7 @@ static int savage_freelist_init(struct drm_device *dev) return 0; } -static drm_buf_t *savage_freelist_get(struct drm_device *dev) +static struct drm_buf *savage_freelist_get(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *tail = dev_priv->tail.prev; @@ -269,7 +269,7 @@ static drm_buf_t *savage_freelist_get(struct drm_device *dev) return NULL; } -void savage_freelist_put(struct drm_device *dev, drm_buf_t *buf) +void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; @@ -1008,7 +1008,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS) static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d) { - drm_buf_t *buf; + struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { @@ -1084,7 +1084,7 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) /*i830_flush_queue(dev);*/ for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; if (buf->filp == filp && buf_priv && diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 39c2c751..e9e2231f 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -58,7 +58,7 @@ typedef struct drm_savage_buf_priv { struct drm_savage_buf_priv *next; struct drm_savage_buf_priv *prev; drm_savage_age_t age; - drm_buf_t *buf; + struct drm_buf *buf; } drm_savage_buf_priv_t; typedef struct drm_savage_dma_page { @@ -203,7 +203,7 @@ extern int savage_bci_buffers(DRM_IOCTL_ARGS); /* BCI functions */ extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, unsigned int flags); -extern void savage_freelist_put(struct drm_device *dev, drm_buf_t *buf); +extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf); extern void savage_dma_reset(drm_savage_private_t *dev_priv); extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 93d2081b..290796ee 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -275,7 +275,7 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv, static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *cmd_header, - const drm_buf_t *dmabuf) + const struct drm_buf *dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->prim.prim; @@ -534,7 +534,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *cmd_header, const uint16_t *idx, - const drm_buf_t *dmabuf) + const struct drm_buf *dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->idx.prim; @@ -891,7 +891,7 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv, static int savage_dispatch_draw(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *start, const drm_savage_cmd_header_t *end, - const drm_buf_t *dmabuf, + const struct drm_buf *dmabuf, const unsigned int *vtxbuf, unsigned int vb_size, unsigned int vb_stride, unsigned int nbox, @@ -957,7 +957,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DRM_DEVICE; drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; - drm_buf_t *dmabuf; + struct drm_buf *dmabuf; drm_savage_cmdbuf_t cmdbuf; drm_savage_cmd_header_t *kcmd_addr = NULL; drm_savage_cmd_header_t *first_draw_cmd; From 191c062933bb7a6f9dabf3fd639321e1dac88c50 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:45:39 +1000 Subject: [PATCH 120/437] drm: remove drm_ref_t --- linux-core/drmP.h | 4 ++-- linux-core/drm_bo.c | 2 +- linux-core/drm_object.c | 8 ++++---- linux-core/drm_objects.h | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 9a79b0df..19e9d627 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -387,11 +387,11 @@ struct drm_buf_entry { */ #define DRM_FILE_HASH_ORDER 8 -typedef enum{ +enum drm_ref_type { _DRM_REF_USE=0, _DRM_REF_TYPE1, _DRM_NO_REF_TYPES -} drm_ref_t; +}; /** File private data */ diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a81dfbde..681d37fe 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1217,7 +1217,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) static void drm_buffer_user_object_unmap(struct drm_file * priv, struct drm_user_object * uo, - drm_ref_t action) + enum drm_ref_type action) { struct drm_buffer_object *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 00627725..3d866333 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -106,7 +106,7 @@ int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item } static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, - drm_ref_t action) + enum drm_ref_type action) { int ret = 0; @@ -125,7 +125,7 @@ static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object } int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action) + enum drm_ref_type ref_action) { int ret = 0; struct drm_ref_object *item; @@ -183,7 +183,7 @@ int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenc struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action) + enum drm_ref_type ref_action) { struct drm_hash_item *hash; int ret; @@ -219,7 +219,7 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) int ret; struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; - drm_ref_t unref_action; + enum drm_ref_type unref_action; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); unref_action = item->unref_action; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 441c19f2..f792dc84 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -64,9 +64,9 @@ struct drm_user_object { struct drm_file *owner; void (*ref_struct_locked) (struct drm_file * priv, struct drm_user_object * obj, - drm_ref_t ref_action); + enum drm_ref_type ref_action); void (*unref) (struct drm_file * priv, struct drm_user_object * obj, - drm_ref_t unref_action); + enum drm_ref_type unref_action); void (*remove) (struct drm_file * priv, struct drm_user_object * obj); }; @@ -81,7 +81,7 @@ struct drm_ref_object { struct drm_hash_item hash; struct list_head list; atomic_t refcount; - drm_ref_t unref_action; + enum drm_ref_type unref_action; }; /** @@ -112,7 +112,7 @@ extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object extern int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action); + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. @@ -120,7 +120,7 @@ extern int drm_add_ref_object(struct drm_file * priv, struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action); + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. * If "item" has been obtained by a call to drm_lookup_ref_object. You may not From 535e3dec8c61474be55588d2b5dc87b0301435f8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:46:37 +1000 Subject: [PATCH 121/437] drm: remove internal sman typedef --- linux-core/drm_sman.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index ece80bed..118e82ae 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -38,11 +38,11 @@ #include "drm_sman.h" -typedef struct drm_owner_item { +struct drm_owner_item { struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; -} drm_owner_item_t; +}; void drm_sman_takedown(struct drm_sman * sman) { @@ -163,16 +163,16 @@ drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, } EXPORT_SYMBOL(drm_sman_set_manager); -static drm_owner_item_t *drm_sman_get_owner_item(struct drm_sman * sman, +static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; struct drm_hash_item *owner_hash_item; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); if (!ret) { - return drm_hash_entry(owner_hash_item, drm_owner_item_t, + return drm_hash_entry(owner_hash_item, struct drm_owner_item, owner_hash); } @@ -200,7 +200,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man { void *tmp; struct drm_sman_mm *sman_mm; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); @@ -272,7 +272,7 @@ int drm_sman_free_key(struct drm_sman *sman, unsigned int key) EXPORT_SYMBOL(drm_sman_free_key); static void drm_sman_remove_owner(struct drm_sman *sman, - drm_owner_item_t *owner_item) + struct drm_owner_item *owner_item) { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); @@ -283,13 +283,13 @@ int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return -1; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { drm_sman_remove_owner(sman, owner_item); return -1; @@ -301,7 +301,7 @@ int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_clean); static void drm_sman_do_owner_cleanup(struct drm_sman *sman, - drm_owner_item_t *owner_item) + struct drm_owner_item *owner_item) { struct drm_memblock_item *entry, *next; @@ -316,14 +316,14 @@ void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); drm_sman_do_owner_cleanup(sman, owner_item); } @@ -331,7 +331,7 @@ EXPORT_SYMBOL(drm_sman_owner_cleanup); void drm_sman_cleanup(struct drm_sman *sman) { - drm_owner_item_t *entry, *next; + struct drm_owner_item *entry, *next; unsigned int i; struct drm_sman_mm *sman_mm; From 0accdc1f69885c6145b6224d26ccd72002f2a72e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:50:04 +1000 Subject: [PATCH 122/437] drm: fixup compat wrappers --- linux-core/drm_ioc32.c | 82 +++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index bbab3ea2..b1162785 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -82,7 +82,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, unsigned long arg) { drm_version32_t v32; - drm_version_t __user *version; + struct drm_version __user *version; int err; if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) @@ -129,7 +129,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; int err; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) @@ -159,7 +159,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; @@ -179,8 +179,8 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, typedef struct drm_map32 { u32 offset; /**< Requested physical address (0 for SAREA)*/ u32 size; /**< Requested physical size (bytes) */ - drm_map_type_t type; /**< Type of memory to map */ - drm_map_flags_t flags; /**< Flags */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ u32 handle; /**< User-space: "Handle" to pass to mmap() */ int mtrr; /**< MTRR slot used */ } drm_map32_t; @@ -190,7 +190,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int idx, err; void *handle; @@ -228,7 +228,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int err; void *handle; @@ -270,7 +270,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, unsigned long arg) { drm_map32_t __user *argp = (void __user *)arg; - drm_map_t __user *map; + struct drm_map __user *map; u32 handle; if (get_user(handle, &argp->handle)) @@ -300,7 +300,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, { drm_client32_t c32; drm_client32_t __user *argp = (void __user *)arg; - drm_client_t __user *client; + struct drm_client __user *client; int idx, err; if (get_user(idx, &argp->idx)) @@ -333,7 +333,7 @@ typedef struct drm_stats32 { u32 count; struct { u32 value; - drm_stat_type_t type; + enum drm_stat_type type; } data[15]; } drm_stats32_t; @@ -342,7 +342,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, { drm_stats32_t s32; drm_stats32_t __user *argp = (void __user *)arg; - drm_stats_t __user *stats; + struct drm_stats __user *stats; int i, err; stats = compat_alloc_user_space(sizeof(*stats)); @@ -379,7 +379,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; int err; unsigned long agp_start; @@ -411,7 +411,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, { drm_buf_desc32_t b32; drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; if (copy_from_user(&b32, argp, sizeof(b32))) return -EFAULT; @@ -440,8 +440,8 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, drm_buf_info32_t req32; drm_buf_info32_t __user *argp = (void __user *)arg; drm_buf_desc32_t __user *to; - drm_buf_info_t __user *request; - drm_buf_desc_t __user *list; + struct drm_buf_info __user *request; + struct drm_buf_desc __user *list; size_t nbytes; int i, err; int count, actual; @@ -457,11 +457,11 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) return -EFAULT; - nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_desc_t *) (request + 1); + list = (struct drm_buf_desc *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -477,7 +477,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&to[i], &list[i], - offsetof(drm_buf_desc_t, flags))) + offsetof(struct drm_buf_desc, flags))) return -EFAULT; if (__put_user(actual, &argp->count)) @@ -505,8 +505,8 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, drm_buf_map32_t __user *argp = (void __user *)arg; drm_buf_map32_t req32; drm_buf_pub32_t __user *list32; - drm_buf_map_t __user *request; - drm_buf_pub_t __user *list; + struct drm_buf_map __user *request; + struct drm_buf_pub __user *list; int i, err; int count, actual; size_t nbytes; @@ -519,11 +519,11 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count < 0) return -EINVAL; - nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_pub_t *) (request + 1); + list = (struct drm_buf_pub *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -539,7 +539,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&list32[i], &list[i], - offsetof(drm_buf_pub_t, address)) + offsetof(struct drm_buf_pub, address)) || __get_user(addr, &list[i].address) || __put_user((unsigned long)addr, &list32[i].address)) @@ -562,7 +562,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_free32_t req32; - drm_buf_free_t __user *request; + struct drm_buf_free __user *request; drm_buf_free32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -589,7 +589,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, unsigned long arg) { drm_ctx_priv_map32_t req32; - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -610,7 +610,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, static int compat_drm_getsareactx(struct file *file, unsigned int cmd, unsigned long arg) { - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; int err; unsigned int ctx_id; @@ -648,7 +648,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, { drm_ctx_res32_t __user *argp = (void __user *)arg; drm_ctx_res32_t res32; - drm_ctx_res_t __user *res; + struct drm_ctx_res __user *res; int err; if (copy_from_user(&res32, argp, sizeof(res32))) @@ -658,7 +658,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) return -EFAULT; if (__put_user(res32.count, &res->count) - || __put_user((drm_ctx_t __user *)(unsigned long)res32.contexts, + || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, &res->contexts)) return -EFAULT; @@ -679,7 +679,7 @@ typedef struct drm_dma32 { int send_count; /**< Number of buffers to send */ u32 send_indices; /**< List of handles to buffers */ u32 send_sizes; /**< Lengths of data to send */ - drm_dma_flags_t flags; /**< Flags */ + enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ u32 request_indices; /**< Buffer information */ @@ -692,7 +692,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, { drm_dma32_t d32; drm_dma32_t __user *argp = (void __user *)arg; - drm_dma_t __user *d; + struct drm_dma __user *d; int err; if (copy_from_user(&d32, argp, sizeof(d32))) @@ -740,7 +740,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd, { drm_agp_mode32_t __user *argp = (void __user *)arg; drm_agp_mode32_t m32; - drm_agp_mode_t __user *mode; + struct drm_agp_mode __user *mode; if (get_user(m32.mode, &argp->mode)) return -EFAULT; @@ -772,7 +772,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, { drm_agp_info32_t __user *argp = (void __user *)arg; drm_agp_info32_t i32; - drm_agp_info_t __user *info; + struct drm_agp_info __user *info; int err; info = compat_alloc_user_space(sizeof(*info)); @@ -813,7 +813,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, { drm_agp_buffer32_t __user *argp = (void __user *)arg; drm_agp_buffer32_t req32; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -845,7 +845,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -868,7 +868,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, { drm_agp_binding32_t __user *argp = (void __user *)arg; drm_agp_binding32_t req32; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; @@ -887,7 +887,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -910,7 +910,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; int err; unsigned long x; @@ -938,7 +938,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; unsigned long x; request = compat_alloc_user_space(sizeof(*request)); @@ -953,13 +953,13 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, } struct drm_wait_vblank_request32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; u32 signal; }; struct drm_wait_vblank_reply32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; s32 tval_sec; s32 tval_usec; @@ -975,7 +975,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, { drm_wait_vblank32_t __user *argp = (void __user *)arg; drm_wait_vblank32_t req32; - drm_wait_vblank_t __user *request; + union drm_wait_vblank __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) From 23631fca09a9769d2391ebdec1f186cf33bf984e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:52:21 +1000 Subject: [PATCH 123/437] drm: fixup old kernel compat code --- linux-core/drm_compat.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 38ca497f..9a6da7e9 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -204,8 +204,8 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -262,7 +262,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -354,8 +354,8 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; mutex_lock(&bo->mutex); @@ -406,7 +406,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) BUG_ON(ret); if (bus_size) { - drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ret = io_remap_pfn_range(vma, vma->vm_start, pfn, From 3f04fe7890fe7728e7df37a6b65ad328a46699bf Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 16 Jul 2007 01:53:06 -0700 Subject: [PATCH 124/437] Fix FreeBSD build. --- bsd-core/drmP.h | 1 + bsd-core/drm_scatter.c | 3 ++- shared-core/i915_dma.c | 14 +++++++++----- shared-core/i915_drv.h | 14 +++++++++----- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 6e05b58f..b2ecd4d4 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -780,6 +780,7 @@ struct drm_device { int last_context; /* Last current context */ int vbl_queue; /* vbl wait channel */ atomic_t vbl_received; + atomic_t vbl_received2; #ifdef __FreeBSD__ struct sigio *buf_sigio; /* Processes waiting for SIGIO */ diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c index 46222f18..99eae408 100644 --- a/bsd-core/drm_scatter.c +++ b/bsd-core/drm_scatter.c @@ -44,6 +44,7 @@ int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) { drm_sg_mem_t *entry; unsigned long pages; + int i; if ( dev->sg ) return EINVAL; @@ -52,7 +53,7 @@ int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) if ( !entry ) return ENOMEM; - pages = round_page(request.size) / PAGE_SIZE; + pages = round_page(request->size) / PAGE_SIZE; DRM_DEBUG( "sg size=%ld pages=%ld\n", request->size, pages ); entry->pages = pages; diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index dbc5f959..535a061a 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -855,12 +855,14 @@ static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t); static int i915_mmio(DRM_IOCTL_ARGS) { - char buf[32]; + uint32_t buf[8]; DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mmio_entry_t *e; drm_i915_mmio_t mmio; void __iomem *base; + int i; + if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); @@ -878,7 +880,8 @@ static int i915_mmio(DRM_IOCTL_ARGS) case I915_MMIO_READ: if (!(e->flag & I915_MMIO_MAY_READ)) return DRM_ERR(EINVAL); - memcpy_fromio(buf, base, e->size); + for (i = 0; i < e->size / 4; i++) + buf[i] = I915_READ(e->offset + i * 4); if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return DRM_ERR(EFAULT); @@ -892,7 +895,8 @@ static int i915_mmio(DRM_IOCTL_ARGS) DRM_ERROR("DRM_COPY_TO_USER failed\n"); return DRM_ERR(EFAULT); } - memcpy_toio(base, buf, e->size); + for (i = 0; i < e->size / 4; i++) + I915_WRITE(e->offset + i * 4, buf[i]); break; } return 0; @@ -910,11 +914,11 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, sizeof(hws)); - printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr); + DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws.addr); dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); - dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr; + dev_priv->hws_map.offset = dev->agp->base + hws.addr; dev_priv->hws_map.size = 4*1024; dev_priv->hws_map.type = 0; dev_priv->hws_map.flags = 0; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e0432996..60b32b0d 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -39,6 +39,11 @@ #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20070209" +#if defined(__linux__) +#define I915_HAVE_FENCE +#define I915_HAVE_BUFFER +#endif + /* Interface history: * * 1.1: Original. @@ -52,13 +57,12 @@ * 1.9: Usable page flipping and triple buffering */ #define DRIVER_MAJOR 1 +#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) #define DRIVER_MINOR 9 -#define DRIVER_PATCHLEVEL 0 - -#if defined(__linux__) -#define I915_HAVE_FENCE -#define I915_HAVE_BUFFER +#else +#define DRIVER_MINOR 6 #endif +#define DRIVER_PATCHLEVEL 0 typedef struct _drm_i915_ring_buffer { int tail_mask; From 70a8a60a3e81c18f9c6485102cb226c340c3cd73 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 10:56:43 -0700 Subject: [PATCH 125/437] Correct errors in the usage of pci_map_page. With these changes the driver no longer instantly hard-locks a 6600LE on a PowerPC G5. I haven't tested any 3D apps yet. --- shared-core/nouveau_object.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 146c4f1c..ea0edb08 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -672,10 +672,10 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, pci_map_page(dev->pdev, dev->sg->pagelist[idx], 0, - DMA_31BIT_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dev->sg->busaddr[idx] == 0) { + if (dma_mapping_error(dev->sg->busaddr[idx])) { return DRM_ERR(ENOMEM); } } From ec67c2def9af16bf9252d6742aec815b817f135a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 15 Jul 2007 17:18:15 +1000 Subject: [PATCH 126/437] nouveau: G8x PCIEGART Actually a NV04-NV50 ttm backend for both PCI and PCIEGART, but PCIGART support for G8X using the current mm has been hacked on top of it. --- linux-core/Makefile.kernel | 1 + linux-core/nouveau_sgdma.c | 318 +++++++++++++++++++++++++++++++++ shared-core/nouveau_drv.h | 35 +++- shared-core/nouveau_fifo.c | 31 ++-- shared-core/nouveau_mem.c | 193 +++++++++++--------- shared-core/nouveau_notifier.c | 3 +- shared-core/nouveau_object.c | 136 ++++++++++---- shared-core/nouveau_state.c | 9 +- shared-core/nv50_graph.c | 2 +- 9 files changed, 591 insertions(+), 137 deletions(-) create mode 100644 linux-core/nouveau_sgdma.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index be2641c8..5aa589cd 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,6 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_sgdma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c new file mode 100644 index 00000000..a65317cd --- /dev/null +++ b/linux-core/nouveau_sgdma.c @@ -0,0 +1,318 @@ +#include "drmP.h" +#include "nouveau_drv.h" + +#define NV_CTXDMA_PAGE_SHIFT 12 +#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) +#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) + +struct nouveau_sgdma_be { + struct drm_ttm_backend backend; + struct drm_device *dev; + + int pages; + int pages_populated; + dma_addr_t *pagelist; + int is_bound; + + unsigned int pte_start; +}; + +static int +nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) +{ + return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + +static int +nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, + struct page **pages) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int p, d, o; + + DRM_DEBUG("num_pages = %ld\n", num_pages); + + if (nvbe->pagelist) + return DRM_ERR(EINVAL); + nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; + nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + + nvbe->pages_populated = d = 0; + for (p = 0; p < num_pages; p++) { + for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { + nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, + pages[p], o, + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(nvbe->pagelist[d])) { + be->func->clear(be); + DRM_ERROR("pci_map_page failed\n"); + return DRM_ERR(EINVAL); + } + nvbe->pages_populated = ++d; + } + } + + return 0; +} + +static void +nouveau_sgdma_clear(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int d; + + DRM_DEBUG("\n"); + + if (nvbe && nvbe->pagelist) { + if (nvbe->is_bound) + be->func->unbind(be); + + for (d = 0; d < nvbe->pages_populated; d--) { + pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + } + drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + } +} + +static int +nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, + int cached) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + uint64_t offset = (pg_start << PAGE_SHIFT); + uint32_t i; + + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + + if (offset & NV_CTXDMA_PAGE_MASK) + return DRM_ERR(EINVAL); + nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) + nvbe->pte_start += 2; /* skip ctxdma header */ + + for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { + uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; + + if (pteval & NV_CTXDMA_PAGE_MASK) { + DRM_ERROR("Bad pteval 0x%llx\n", pteval); + return DRM_ERR(EINVAL); + } + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, i, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); + INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); + } + } + + nvbe->is_bound = 1; + return 0; +} + +static int +nouveau_sgdma_unbind(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + DRM_DEBUG("\n"); + + if (nvbe->is_bound) { + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned int pte; + + pte = nvbe->pte_start; + while (pte < (nvbe->pte_start + nvbe->pages)) { + uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, pte, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010); + INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004); + } + + pte++; + } + + nvbe->is_bound = 0; + } + + return 0; +} + +static void +nouveau_sgdma_destroy(struct drm_ttm_backend *be) +{ + DRM_DEBUG("\n"); + if (be) { + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + if (nvbe) { + if (nvbe->pagelist) + be->func->clear(be); + drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); + } + } +} + +static struct drm_ttm_backend_func nouveau_sgdma_backend = { + .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nouveau_sgdma_bind, + .unbind = nouveau_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct drm_ttm_backend * +nouveau_sgdma_init_ttm(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_sgdma_be *nvbe; + + if (!dev_priv->gart_info.sg_ctxdma) + return NULL; + + nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); + if (!nvbe) + return NULL; + + nvbe->dev = dev; + + nvbe->backend.func = &nouveau_sgdma_backend; + nvbe->backend.mem_type = DRM_BO_MEM_TT; + + return &nvbe->backend; +} + +int +nouveau_sgdma_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + uint32_t aper_size, obj_size; + int i, ret; + + if (dev_priv->card_type < NV_50) { + aper_size = (64 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { + /* 1 entire VM page table */ + aper_size = (512 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; + } + + if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, + NVOBJ_FLAG_ALLOW_NO_REFS | + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { + DRM_ERROR("Error creating sgdma object: %d\n", ret); + return ret; + } + + if (dev_priv->card_type < NV_50) { + dev_priv->gart_info.sg_dummy_page = + alloc_page(GFP_KERNEL|__GFP_DMA32); + SetPageLocked(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_bus = + pci_map_page(dev->pdev, + dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE + * on those cards? */ + INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + INSTANCE_WR(gpuobj, 1, aper_size - 1); + for (i=2; i<2+(aper_size>>12); i++) { + INSTANCE_WR(gpuobj, i, + dev_priv->gart_info.sg_dummy_bus | 3); + } + } else { + for (i=0; igart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + dev_priv->gart_info.sg_ctxdma = gpuobj; + return 0; +} + +void +nouveau_sgdma_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->gart_info.sg_dummy_page) { + pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, + NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + unlock_page(dev_priv->gart_info.sg_dummy_page); + __free_page(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_page = NULL; + dev_priv->gart_info.sg_dummy_bus = 0; + } + + nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); +} + +int +nouveau_sgdma_nottm_hack_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_ttm_backend *be; + struct drm_scatter_gather sgreq; + int ret; + + dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); + if (!dev_priv->gart_info.sg_be) + return DRM_ERR(ENOMEM); + be = dev_priv->gart_info.sg_be; + + /* Hack the aperture size down to the amount of system memory + * we're going to bind into it. + */ + if (dev_priv->gart_info.aper_size > 32*1024*1024) + dev_priv->gart_info.aper_size = 32*1024*1024; + + sgreq.size = dev_priv->gart_info.aper_size; + if ((ret = drm_sg_alloc(dev, &sgreq))) { + DRM_ERROR("drm_sg_alloc failed: %d\n", ret); + return ret; + } + dev_priv->gart_info.sg_handle = sgreq.handle; + + if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { + DRM_ERROR("failed populate: %d\n", ret); + return ret; + } + + if ((ret = be->func->bind(be, 0, 0))) { + DRM_ERROR("failed bind: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) +{ +} + diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 4fa979e6..f68304c9 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -118,6 +118,10 @@ struct nouveau_fifo struct nouveau_gpuobj_ref *ramin_grctx; uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */ + /* NV50 VM */ + struct nouveau_gpuobj *vm_pd; + struct nouveau_gpuobj_ref *vm_gart_pt; + /* Objects */ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ struct mem_block *ramin_heap; /* Private PRAMIN heap */ @@ -220,8 +224,24 @@ struct drm_nouveau_private { /* base physical adresses */ uint64_t fb_phys; uint64_t fb_available_size; - uint64_t agp_phys; - uint64_t agp_available_size; + + struct { + enum { + NOUVEAU_GART_NONE = 0, + NOUVEAU_GART_AGP, + NOUVEAU_GART_SGDMA + } type; + uint64_t aper_base; + uint64_t aper_size; + + struct nouveau_gpuobj *sg_ctxdma; + struct page *sg_dummy_page; + dma_addr_t sg_dummy_bus; + + /* nottm hack */ + struct drm_ttm_backend *sg_be; + unsigned long sg_handle; + } gart_info; /* the mtrr covering the FB */ int fb_mtrr; @@ -307,6 +327,10 @@ extern int nouveau_gpuobj_dma_new(struct drm_device *, int channel, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gart_dma_new(struct drm_device *, int channel, + uint64_t offset, uint64_t size, + int access, struct nouveau_gpuobj **, + uint32_t *o_ret); extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, struct nouveau_gpuobj **); extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); @@ -317,6 +341,13 @@ extern void nouveau_irq_preinstall(struct drm_device*); extern void nouveau_irq_postinstall(struct drm_device*); extern void nouveau_irq_uninstall(struct drm_device*); +/* nouveau_sgdma.c */ +extern int nouveau_sgdma_init(struct drm_device *); +extern void nouveau_sgdma_takedown(struct drm_device *); +extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); +extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); +extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); + /* nv04_fb.c */ extern int nv04_fb_init(struct drm_device *dev); extern void nv04_fb_takedown(struct drm_device *dev); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 56c25a6e..230c8298 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -211,24 +211,27 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } if (cb->flags & NOUVEAU_MEM_AGP) { - DRM_DEBUG("Creating CB in AGP memory\n"); + ret = nouveau_gpuobj_gart_dma_new(dev, channel, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + &pushbuf, + &chan->pushbuf_base); + } else + if (cb->flags & NOUVEAU_MEM_PCI) { ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); - } else if ( cb->flags & NOUVEAU_MEM_PCI) { - DRM_DEBUG("Creating CB in PCI memory\n"); - ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, - cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf); + NV_CLASS_DMA_IN_MEMORY, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI_NONLINEAR, + &pushbuf); + chan->pushbuf_base = 0; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new (dev, channel, NV_CLASS_DMA_IN_MEMORY, cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); + chan->pushbuf_base = 0; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -239,6 +242,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) cb->start + drm_get_resource_start(dev, 1), cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); + chan->pushbuf_base = 0; } if (ret) { @@ -250,11 +254,12 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf, &chan->pushbuf))) { DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); + if (pushbuf != dev_priv->gart_info.sg_ctxdma) + nouveau_gpuobj_del(dev, &pushbuf); return ret; } - dev_priv->fifos[channel]->pushbuf_base = 0; - dev_priv->fifos[channel]->pushbuf_mem = cb; + chan->pushbuf_mem = cb; return 0; } diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index e5906867..7a923e17 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -209,12 +209,11 @@ void nouveau_mem_takedown(struct mem_block **heap) void nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); - if ( dev_priv->pci_heap ) - { + if (dev_priv->pci_heap) nouveau_mem_takedown(&dev_priv->pci_heap); - } } /* returns the amount of FB ram in bytes */ @@ -282,93 +281,68 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) return 0; } +static int +nouveau_mem_init_agp(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_agp_info info; + struct drm_agp_mode mode; + struct drm_agp_buffer agp_req; + struct drm_agp_binding bind_req; + int ret; + ret = drm_agp_acquire(dev); + if (ret) { + DRM_ERROR("Unable to acquire AGP: %d\n", ret); + return ret; + } + + ret = drm_agp_info(dev, &info); + if (ret) { + DRM_ERROR("Unable to get AGP info: %d\n", ret); + return ret; + } + + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = info.mode; + ret = drm_agp_enable(dev, mode); + if (ret) { + DRM_ERROR("Unable to enable AGP: %d\n", ret); + return ret; + } + + agp_req.size = info.aperture_size; + agp_req.type = 0; + ret = drm_agp_alloc(dev, &agp_req); + if (ret) { + DRM_ERROR("Unable to alloc AGP: %d\n", ret); + return ret; + } + + bind_req.handle = agp_req.handle; + bind_req.offset = 0; + ret = drm_agp_bind(dev, &bind_req); + if (ret) { + DRM_ERROR("Unable to bind AGP: %d\n", ret); + return ret; + } + + dev_priv->gart_info.type = NOUVEAU_GART_AGP; + dev_priv->gart_info.aper_base = info.aperture_base; + dev_priv->gart_info.aper_size = info.aperture_size; + return 0; +} int nouveau_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_size; - struct drm_scatter_gather sgreq; - dev_priv->agp_phys=0; - dev_priv->fb_phys=0; - sgreq . size = 4 << 20; //4MB of PCI scatter-gather zone + int ret = 0; - /* init AGP */ - dev_priv->agp_heap=NULL; - if (drm_device_is_agp(dev)) - { - int err; - struct drm_agp_info info; - struct drm_agp_mode mode; - struct drm_agp_buffer agp_req; - struct drm_agp_binding bind_req; + dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; + dev_priv->fb_phys = 0; + dev_priv->gart_info.type = NOUVEAU_GART_NONE; - err = drm_agp_acquire(dev); - if (err) { - DRM_ERROR("Unable to acquire AGP: %d\n", err); - goto no_agp; - } - - err = drm_agp_info(dev, &info); - if (err) { - DRM_ERROR("Unable to get AGP info: %d\n", err); - goto no_agp; - } - - /* see agp.h for the AGPSTAT_* modes available */ - mode.mode = info.mode; - err = drm_agp_enable(dev, mode); - if (err) { - DRM_ERROR("Unable to enable AGP: %d\n", err); - goto no_agp; - } - - agp_req.size = info.aperture_size; - agp_req.type = 0; - err = drm_agp_alloc(dev, &agp_req); - if (err) { - DRM_ERROR("Unable to alloc AGP: %d\n", err); - goto no_agp; - } - - bind_req.handle = agp_req.handle; - bind_req.offset = 0; - err = drm_agp_bind(dev, &bind_req); - if (err) { - DRM_ERROR("Unable to bind AGP: %d\n", err); - goto no_agp; - } - - if (nouveau_mem_init_heap(&dev_priv->agp_heap, - 0, info.aperture_size)) - goto no_agp; - - dev_priv->agp_phys = info.aperture_base; - dev_priv->agp_available_size = info.aperture_size; - goto have_agp; - } - -no_agp: - - if ( dev_priv->card_type >= NV_50 ) goto no_pci; - - dev_priv->pci_heap = NULL; - DRM_DEBUG("Allocating sg memory for PCI DMA\n"); - if ( drm_sg_alloc(dev, &sgreq) ) - { - DRM_ERROR("Unable to allocate 4MB of scatter-gather pages for PCI DMA!"); - goto no_pci; - } - - if ( nouveau_mem_init_heap(&dev_priv->pci_heap, 0, - dev->sg->pages * PAGE_SIZE)) - { - DRM_ERROR("Unable to initialize pci_heap!"); - goto no_pci; - } - -no_pci: -have_agp: /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), @@ -399,6 +373,54 @@ have_agp: dev_priv->fb_nomap_heap=NULL; } + /* Init AGP / NV50 PCIEGART */ + if (drm_device_is_agp(dev) && dev->agp) { + if ((ret = nouveau_mem_init_agp(dev))) + DRM_ERROR("Error initialising AGP: %d\n", ret); + } + + /*Note: this is *not* just NV50 code, but only used on NV50 for now */ + if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && + dev_priv->card_type >= NV_50) { + ret = nouveau_sgdma_init(dev); + if (!ret) { + ret = nouveau_sgdma_nottm_hack_init(dev); + if (ret) + nouveau_sgdma_takedown(dev); + } + + if (ret) + DRM_ERROR("Error initialising SG DMA: %d\n", ret); + } + + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + if (nouveau_mem_init_heap(&dev_priv->agp_heap, + 0, dev_priv->gart_info.aper_size)) { + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + nouveau_sgdma_nottm_hack_takedown(dev); + nouveau_sgdma_takedown(dev); + } + } + } + + /* NV04-NV40 PCIEGART */ + if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { + struct drm_scatter_gather sgreq; + + DRM_DEBUG("Allocating sg memory for PCI DMA\n"); + sgreq.size = 4 << 20; //4MB of PCI scatter-gather zone + + if (drm_sg_alloc(dev, &sgreq)) { + DRM_ERROR("Unable to allocate 4MB of scatter-gather" + " pages for PCI DMA!"); + } else { + if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, + dev->sg->pages * PAGE_SIZE)) { + DRM_ERROR("Unable to initialize pci_heap!"); + } + } + } + return 0; } @@ -473,9 +495,14 @@ alloc_ok: int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; - if (type == NOUVEAU_MEM_AGP) + if (type == NOUVEAU_MEM_AGP) { + if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ret = drm_addmap(dev, block->start, block->size, _DRM_AGP, 0, &block->map); + else + ret = drm_addmap(dev, block->start, block->size, + _DRM_SCATTER_GATHER, 0, &block->map); + } else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 36dba654..238e3c8b 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -37,7 +37,8 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) int flags, ret; /*TODO: PCI notifier blocks */ - if (dev_priv->agp_heap) + if (dev_priv->agp_heap && + dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; else flags = NOUVEAU_MEM_FB; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index ea0edb08..f0025d7a 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -596,7 +596,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, switch (target) { case NV_DMA_TARGET_AGP: - offset += dev_priv->agp_phys; + offset += dev_priv->gart_info.aper_base; break; case NV_DMA_TARGET_PCI_NONLINEAR: /*assume the "offset" is a virtual memory address*/ @@ -689,10 +689,20 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, } } } else { - INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); + uint32_t flags0, flags5; + + if (target == NV_DMA_TARGET_VIDMEM) { + flags0 = 0x00190000; + flags5 = 0x00010000; + } else { + flags0 = 0x7fc00000; + flags5 = 0x00080000; + } + + INSTANCE_WR(*gpuobj, 0, flags0 | class); INSTANCE_WR(*gpuobj, 1, offset + size - 1); INSTANCE_WR(*gpuobj, 2, offset); - INSTANCE_WR(*gpuobj, 5, 0x00010000); + INSTANCE_WR(*gpuobj, 5, flags5); } (*gpuobj)->engine = NVOBJ_ENGINE_SW; @@ -700,6 +710,42 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, return 0; } +int +nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, + uint64_t offset, uint64_t size, int access, + struct nouveau_gpuobj **gpuobj, + uint32_t *o_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || + (dev_priv->card_type >= NV_50 && + dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + offset, size, access, + NV_DMA_TARGET_AGP, gpuobj); + if (o_ret) + *o_ret = 0; + } else + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + *gpuobj = dev_priv->gart_info.sg_ctxdma; + if (offset & ~0xffffffffULL) { + DRM_ERROR("obj offset exceeds 32-bits\n"); + return DRM_ERR(EINVAL); + } + if (o_ret) + *o_ret = (uint32_t)offset; + ret = (*gpuobj != NULL) ? 0 : DRM_ERR(EINVAL); + } else { + DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); + return DRM_ERR(EINVAL); + } + + return ret; +} + /* Context objects in the instance RAM have the following structure. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. @@ -857,7 +903,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *vram = NULL, *tt = NULL; - int ret; + int ret, i; DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); @@ -870,6 +916,29 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } + /* NV50 VM, point offset 0-512MiB at shared PCIEGART table */ + if (dev_priv->card_type >= NV_50) { + uint32_t vm_offset; + + vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; + vm_offset += chan->ramin->gpuobj->im_pramin->start; + if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, 0x4000, + 0, &chan->vm_pd, NULL))) + return ret; + for (i=0; i<0x4000; i+=8) { + INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000); + INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); + } + + if ((ret = nouveau_gpuobj_ref_add(dev, -1, 0, + dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt))) + return ret; + INSTANCE_WR(chan->vm_pd, (0+0)/4, + chan->vm_gart_pt->instance | 0x03); + INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000); + } + /* RAMHT */ if (dev_priv->card_type < NV_50) { ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, @@ -899,40 +968,34 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } - if (dev_priv->agp_heap) { - /* AGPGART ctxdma */ - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &tt))) { - DRM_ERROR("Error creating AGP TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); - } - - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); - if (ret) { - DRM_ERROR("Error referencing AGP TT ctxdma: %d\n", ret); - return ret; - } + /* TT memory ctxdma */ + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + ret = nouveau_gpuobj_gart_dma_new(dev, channel, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RW, &tt, NULL); + } else + if (dev_priv->pci_heap) { + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + 0, dev->sg->pages * PAGE_SIZE, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_PCI_NONLINEAR, &tt); + } else { + DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); + ret = DRM_ERR(EINVAL); } - else if ( dev_priv->pci_heap) { - if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ - /*PCI*/ - if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev->sg->pages * PAGE_SIZE, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { - DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return 0; //this is noncritical - } - - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); - if (ret) { - DRM_ERROR("Error referencing PCI TT ctxdma: %d\n", ret); - return ret; - } + if (ret) { + DRM_ERROR("Error creating TT ctxdma: %d\n", ret); + return ret; } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); + return ret; + } + return 0; } @@ -951,6 +1014,9 @@ nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) } nouveau_gpuobj_ref_del(dev, &chan->ramht); + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); + if (chan->ramin_heap) nouveau_mem_takedown(&chan->ramin_heap); if (chan->ramin) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 69e9c221..4e3b39dd 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -332,7 +332,12 @@ static void nouveau_card_takedown(struct drm_device *dev) engine->fb.takedown(dev); engine->timer.takedown(dev); engine->mc.takedown(dev); + + nouveau_sgdma_nottm_hack_takedown(dev); + nouveau_sgdma_takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); engine->instmem.takedown(dev); @@ -442,7 +447,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) getparam.value=dev_priv->fb_phys; break; case NOUVEAU_GETPARAM_AGP_PHYSICAL: - getparam.value=dev_priv->agp_phys; + getparam.value=dev_priv->gart_info.aper_base; break; case NOUVEAU_GETPARAM_PCI_PHYSICAL: if ( dev -> sg ) @@ -457,7 +462,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) getparam.value=dev_priv->fb_available_size; break; case NOUVEAU_GETPARAM_AGP_SIZE: - getparam.value=dev_priv->agp_available_size; + getparam.value=dev_priv->gart_info.aper_size; break; default: DRM_ERROR("unknown parameter %lld\n", getparam.param); diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 54fe498b..6a04c158 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -271,7 +271,7 @@ nv50_graph_load_context(struct drm_device *dev, int channel) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); - int ret; + int ret; (void)ret; DRM_DEBUG("ch%d\n", channel); From 4575d5b8f18fef8cd19e7884bf8dab5e8f71ec9e Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 20:56:11 -0700 Subject: [PATCH 127/437] Massive log message clean up in xgi_submit_cmdlist. --- linux-core/xgi_cmdlist.c | 97 ++++++---------------------------------- 1 file changed, 13 insertions(+), 84 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 7be0ac48..61373469 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -64,131 +64,61 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) { - unsigned int beginPort; - /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); - /* Jong 05/25/2006 */ - /* return; */ - - beginPort = getCurBatchBeginPort(pCmdInfo); - XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); - - /* Jong 05/25/2006 */ - /* return; */ + XGI_INFO("After getCurBatchBeginPort()\n"); if (s_cmdring._lastBatchStartAddr == 0) { - unsigned int portOffset; + const unsigned int portOffset = BASE_3D_ENG + beginPort; /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ - /* Jong 06132006; BASE_3D_ENG=0x2800 */ - /* beginPort: 2D: 0x30 */ - portOffset = BASE_3D_ENG + beginPort; - // Enable PCI Trigger Mode - XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); + XGI_INFO("Enable PCI Trigger Mode \n"); - /* Jong 05/25/2006 */ - /* return; */ - - /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ - XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", - M2REG_AUTO_LINK_SETTING_ADDRESS); - XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", - M2REG_CLEAR_COUNTERS_MASK); - XGI_INFO - ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); - XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", - M2REG_PCI_TRIGGER_MODE_MASK); /* Jong 06/14/2006; 0x400001a */ - XGI_INFO - ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | - M2REG_CLEAR_COUNTERS_MASK | 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | M2REG_CLEAR_COUNTERS_MASK | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); - /* Jong 05/25/2006 */ - XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); - /* return; *//* OK */ - /* Jong 06/14/2006; 0x400000a */ - XGI_INFO - ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); // Send PCI begin command - XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); - /* return; */ + XGI_INFO("Send PCI begin command \n"); - XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", - portOffset); - XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + XGI_INFO("portOffset=%d, beginPort=%d\n", + portOffset, beginPort); /* beginPort = 48; */ /* 0xc100000 */ dwWriteReg(portOffset, (beginPort << 22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort << 22)); - XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); - XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", - pCmdInfo->_curDebugID); - XGI_INFO - ("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", - (beginPort << 22) + (BEGIN_VALID_MASK) + - pCmdInfo->_curDebugID); - XGI_INFO - ("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); - /* return; *//* OK */ + + XGI_INFO("Send PCI begin command- After\n"); /* 0x80000024 */ dwWriteReg(portOffset + 4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", - BEGIN_LINK_ENABLE_MASK); - XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", - pCmdInfo->_firstSize); - XGI_INFO - ("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); /* 0x1010000 */ dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", - pCmdInfo->_firstBeginAddr); - XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", - (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); - - /* Jong 06/13/2006 */ - xgi_dump_register(info); /* Jong 06/12/2006; system hang; marked for test */ dwWriteReg(portOffset + 12, 0); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { u32 *lastBatchVirtAddr; - XGI_INFO - ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); - - /* Jong 05/25/2006 */ - /* return; */ + XGI_INFO("s_cmdring._lastBatchStartAddr != 0\n"); if (pCmdInfo->_firstBeginType == BTYPE_3D) { addFlush2D(info); @@ -215,14 +145,13 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* Jong 06/12/2006; system hang; marked for test */ triggerHWCommandList(info, pCmdInfo->_beginCount); + } else { + XGI_ERROR("lastBatchVirtAddr is NULL\n"); } - - XGI_INFO - ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); + XGI_INFO("End\n"); } /* From 658ff2daf3d2a080da2d859f522a627aef841637 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 20:58:43 -0700 Subject: [PATCH 128/437] Eliminate several useless ioctls and associated cruft. The ioctlss XGI_ESC_DEVICE_INFO, XGI_ESC_MEM_COLLECT, XGI_ESC_PCIE_CHECK, XGI_ESC_GET_SCREEN_INFO, XGI_ESC_PUT_SCREEN_INFO, XGI_ESC_MMIO_INFO, and XGI_ESC_SAREA_INFO, are completely unnecessary. The will be doubly useless when the driver is converted to the DRM infrastructure. --- linux-core/xgi_drv.c | 28 ------------------ linux-core/xgi_drv.h | 3 -- linux-core/xgi_misc.c | 66 ------------------------------------------- linux-core/xgi_misc.h | 5 ---- linux-core/xgi_pcie.c | 29 ------------------- shared-core/xgi_drm.h | 60 ++++++++++++--------------------------- 6 files changed, 18 insertions(+), 173 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index b3425c75..bd39dfdc 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -877,10 +877,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, arg_size); switch (_IOC_NR(cmd)) { - case XGI_ESC_DEVICE_INFO: - XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); - xgi_get_device_info(info, (struct xgi_chip_info *)arg_copy); - break; case XGI_ESC_POST_VBIOS: XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); break; @@ -892,10 +888,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, XGI_INFO("Jong-xgi_ioctl_fb_free \n"); xgi_fb_free(info, *(unsigned long *)arg_copy); break; - case XGI_ESC_MEM_COLLECT: - XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); - xgi_mem_collect(info, (unsigned int *)arg_copy); - break; case XGI_ESC_PCIE_ALLOC: XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); xgi_pcie_alloc(info, alloc, 0); @@ -905,30 +897,10 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, *((unsigned long *)arg_copy)); xgi_pcie_free(info, *((unsigned long *)arg_copy)); break; - case XGI_ESC_PCIE_CHECK: - XGI_INFO("Jong-xgi_pcie_heap_check \n"); - xgi_pcie_heap_check(); - break; - case XGI_ESC_GET_SCREEN_INFO: - XGI_INFO("Jong-xgi_get_screen_info \n"); - xgi_get_screen_info(info, (struct xgi_screen_info *)arg_copy); - break; - case XGI_ESC_PUT_SCREEN_INFO: - XGI_INFO("Jong-xgi_put_screen_info \n"); - xgi_put_screen_info(info, (struct xgi_screen_info *)arg_copy); - break; - case XGI_ESC_MMIO_INFO: - XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); - xgi_get_mmio_info(info, (struct xgi_mmio_info *)arg_copy); - break; case XGI_ESC_GE_RESET: XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); xgi_ge_reset(info); break; - case XGI_ESC_SAREA_INFO: - XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); - xgi_sarea_info(info, (struct xgi_sarea_info *)arg_copy); - break; case XGI_ESC_DUMP_REGISTER: XGI_INFO("Jong-xgi_ioctl_dump_register \n"); xgi_dump_register(info); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 983ed0a9..382bb7a6 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -110,8 +110,6 @@ struct xgi_info { struct xgi_aperture mmio; struct xgi_aperture fb; struct xgi_aperture pcie; - struct xgi_screen_info scrn_info; - struct xgi_sarea_info sarea_info; /* look up table parameters */ u32 *lut_base; @@ -207,7 +205,6 @@ extern void xgi_pcie_heap_cleanup(struct xgi_info * info); extern void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, pid_t pid); extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); -extern void xgi_pcie_heap_check(void); extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address); extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 2d310a2f..bb2813ca 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -31,78 +31,12 @@ #include "xgi_regs.h" #include "xgi_pcie.h" -void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) -{ - req->device_id = info->dev->device; - req->device_name[0] = 'x'; - req->device_name[1] = 'g'; - req->device_name[2] = '4'; - req->device_name[3] = '7'; - req->vendor_id = info->dev->vendor; - req->curr_display_mode = 0; - req->fb_size = info->fb.size; - req->sarea_bus_addr = info->sarea_info.bus_addr; - req->sarea_size = info->sarea_info.size; -} - -void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req) -{ - req->mmio_base = info->mmio.base; - req->size = info->mmio.size; -} - -void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req) -{ - info->scrn_info.scrn_start = req->scrn_start; - info->scrn_info.scrn_xres = req->scrn_xres; - info->scrn_info.scrn_yres = req->scrn_yres; - info->scrn_info.scrn_bpp = req->scrn_bpp; - info->scrn_info.scrn_pitch = req->scrn_pitch; - - XGI_INFO("info->scrn_info.scrn_start: 0x%lx" - "info->scrn_info.scrn_xres: 0x%lx" - "info->scrn_info.scrn_yres: 0x%lx" - "info->scrn_info.scrn_bpp: 0x%lx" - "info->scrn_info.scrn_pitch: 0x%lx\n", - info->scrn_info.scrn_start, - info->scrn_info.scrn_xres, - info->scrn_info.scrn_yres, - info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); -} - -void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req) -{ - req->scrn_start = info->scrn_info.scrn_start; - req->scrn_xres = info->scrn_info.scrn_xres; - req->scrn_yres = info->scrn_info.scrn_yres; - req->scrn_bpp = info->scrn_info.scrn_bpp; - req->scrn_pitch = info->scrn_info.scrn_pitch; - - XGI_INFO("req->scrn_start: 0x%lx" - "req->scrn_xres: 0x%lx" - "req->scrn_yres: 0x%lx" - "req->scrn_bpp: 0x%lx" - "req->scrn_pitch: 0x%lx\n", - req->scrn_start, - req->scrn_xres, - req->scrn_yres, req->scrn_bpp, req->scrn_pitch); -} - void xgi_ge_reset(struct xgi_info * info) { xgi_disable_ge(info); xgi_enable_ge(info); } -void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) -{ - info->sarea_info.bus_addr = req->bus_addr; - info->sarea_info.size = req->size; - XGI_INFO("info->sarea_info.bus_addr: 0x%lx" - "info->sarea_info.size: 0x%lx\n", - info->sarea_info.bus_addr, info->sarea_info.size); -} - /* * irq functions */ diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 85cfbf2b..9c0591b2 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -30,12 +30,7 @@ #define _XGI_MISC_H_ extern void xgi_dump_register(struct xgi_info * info); -extern void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req); -extern void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req); -extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req); -extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req); extern void xgi_ge_reset(struct xgi_info * info); -extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); extern void xgi_restore_registers(struct xgi_info * info); extern bool xgi_ge_irq_handler(struct xgi_info * info); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 70459b2c..0d641ab8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -344,35 +344,6 @@ int xgi_pcie_heap_init(struct xgi_info * info) return 0; } -void xgi_pcie_heap_check(void) -{ -#ifdef XGI_DEBUG - struct xgi_pcie_block *block; - unsigned int ownerIndex; - static const char *const ownerStr[6] = - { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; - - if (!xgi_pcie_heap) { - return; - } - - XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - if (block->owner == PCIE_2D) - ownerIndex = 0; - else if (block->owner > PCIE_3D_TEXTURE - || block->owner < PCIE_2D - || block->owner < PCIE_3D) - ownerIndex = 5; - else - ownerIndex = block->owner - PCIE_3D + 1; - - XGI_INFO("Allocated by %s, block offset: 0x%lx, size: 0x%lx \n", - ownerStr[ownerIndex], block->offset, block->size); - } -#endif -} - void xgi_pcie_heap_cleanup(struct xgi_info * info) { struct list_head *free_list; diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index 0abf390a..bc39cbf7 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -31,15 +31,17 @@ #include #include -struct xgi_chip_info { +struct drm_xgi_sarea { __u16 device_id; __u16 vendor_id; char device_name[32]; - unsigned int curr_display_mode; //Singe, DualView(Contained), MHS - unsigned int fb_size; - unsigned long sarea_bus_addr; - unsigned int sarea_size; + + unsigned int scrn_start; + unsigned int scrn_xres; + unsigned int scrn_yres; + unsigned int scrn_bpp; + unsigned int scrn_pitch; }; enum xgi_mem_location { @@ -65,14 +67,6 @@ struct xgi_mem_alloc { unsigned long bus_addr; }; -struct xgi_screen_info { - unsigned int scrn_start; - unsigned int scrn_xres; - unsigned int scrn_yres; - unsigned int scrn_bpp; - unsigned int scrn_pitch; -}; - struct xgi_sarea_info { unsigned long bus_addr; unsigned int size; @@ -114,53 +108,35 @@ struct xgi_mmio_info { #define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ #define XGI_IOCTL_BASE 0 -#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) -#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) +#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 0) -#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) -#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) -#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) -#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) -#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) -#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) -#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) -#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) -#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) -#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) -#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) -#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) -#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) -#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) -#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) -#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) -#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 20) +#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 1) +#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 2) +#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 3) +#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 4) +#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 5) +#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 6) +#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 7) +#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 8) +#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 9) +#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 10) -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) -#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) #define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) -#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) #define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info) - #define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info) #define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) #define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) #define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) #define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) #define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) -#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) #define XGI_IOCTL_MAXNR 30 /* From 875dd1e53852d231b60eb82bfed33c016f92f3b8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 17 Jul 2007 14:06:05 +1000 Subject: [PATCH 129/437] nouveau: Destroy PGRAPH context table on PGRAPH takedown --- shared-core/nv20_graph.c | 3 +++ shared-core/nv30_graph.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index e6aa1e2a..8af3bd12 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -242,5 +242,8 @@ int nv20_graph_init(struct drm_device *dev) { void nv20_graph_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); } diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 23e0f7f0..d7138772 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -281,5 +281,8 @@ int nv30_graph_init(struct drm_device *dev) void nv30_graph_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); } From 2b6ea465134e72fa6aa96df5e40fbc91b561ef00 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 21:11:22 -0700 Subject: [PATCH 130/437] Eliminate unnecessary structures and defines. --- shared-core/xgi_drm.h | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index bc39cbf7..67118884 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -67,11 +67,6 @@ struct xgi_mem_alloc { unsigned long bus_addr; }; -struct xgi_sarea_info { - unsigned long bus_addr; - unsigned int size; -}; - enum xgi_batch_type { BTYPE_2D = 0, BTYPE_3D = 1, @@ -95,11 +90,6 @@ struct xgi_state_info { unsigned int _toState; }; -struct xgi_mmio_info { - unsigned long mmio_base; - unsigned int size; -}; - /* * Ioctl definitions @@ -139,14 +129,4 @@ struct xgi_mmio_info { #define XGI_IOCTL_MAXNR 30 -/* - * flags - */ -#define XGI_FLAG_OPEN 0x0001 -#define XGI_FLAG_NEEDS_POSTING 0x0002 -#define XGI_FLAG_WAS_POSTED 0x0004 -#define XGI_FLAG_CONTROL 0x0010 -#define XGI_FLAG_MAP_REGS_EARLY 0x0200 - - #endif /* _XGI_DRM_H_ */ From 5b08ab258f3e541334d2b64d38e15e1431080199 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 21:12:30 -0700 Subject: [PATCH 131/437] Clean ups (primarilly log messages) in xgi_test_rwinkernel. --- linux-core/xgi_pcie.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 0d641ab8..cfc9febc 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -919,20 +919,21 @@ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) */ void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address) { - unsigned long *virtaddr = 0; + u32 *virtaddr = 0; + + XGI_INFO("input GE HW addr is 0x%x\n", address); + if (address == 0) { - XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); return; } - virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address); + virtaddr = (u32 *)xgi_find_pcie_virt(info, address); + + XGI_INFO("convert to CPU virt addr 0x%p\n", virtaddr); - XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); - XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); - XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); if (virtaddr != NULL) { + XGI_INFO("original [virtaddr] = 0x%x\n", *virtaddr); *virtaddr = 0x00f00fff; + XGI_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); } - - XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); } From bcba7ba981a88e27ad4d7e8ebcdbed7097cf1488 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 21:15:58 -0700 Subject: [PATCH 132/437] Log message clean up in WriteRegDWord. Remove unused inline functions. --- linux-core/xgi_regs.h | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 0e54e7d8..bc3e2a1e 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -153,16 +153,10 @@ static inline void writeAttr(struct xgi_info * info, u8 index, u8 value) */ static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data) { - /* Jong 05/25/2006 */ - XGI_INFO("Jong-WriteRegDWord()-Begin \n"); - XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", - info->mmio.vbase); - XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); - XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); - /* return; */ + XGI_INFO("mmio vbase = 0x%p, addr = 0x%x, data = 0x%x\n", + info->mmio->vbase, addr, data); *(volatile u32 *)(info->mmio.vbase + addr) = (data); - XGI_INFO("Jong-WriteRegDWord()-End \n"); } static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data) @@ -262,18 +256,6 @@ extern void DisableProtect(); #define wReadReg(addr) ReadRegWord(info, addr) #define bReadReg(addr) ReadRegByte(info, addr) -static inline void xgi_protect_all(struct xgi_info * info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - -static inline void xgi_unprotect_all(struct xgi_info * info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - static inline void xgi_enable_mmio(struct xgi_info * info) { u8 protect = 0; From 7f98815d0027b1d4bd07b08e540106d5e994bcc5 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 22:15:01 -0700 Subject: [PATCH 133/437] Make drm_sg_free callable in-kernel. --- linux-core/drmP.h | 5 +++-- linux-core/drm_scatter.c | 29 +++++++++++++++++++---------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2bbc6200..ebb530bc 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1135,8 +1135,9 @@ extern void drm_sg_cleanup(drm_sg_mem_t * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); -extern int drm_sg_free(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_sg_free(struct drm_device *dev, unsigned long handle); +extern int drm_sg_free_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index c0d6db24..5581dc0b 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -203,6 +203,7 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, if (copy_to_user(argp, &request, sizeof(request))) { drm_sg_cleanup(priv->head->dev->sg); + priv->head->dev->sg = NULL; return -EFAULT; } @@ -211,26 +212,18 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, } -int drm_sg_free(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_sg_free(struct drm_device *dev, unsigned long handle) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t request; drm_sg_mem_t *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - if (copy_from_user(&request, - (drm_scatter_gather_t __user *) arg, - sizeof(request))) - return -EFAULT; entry = dev->sg; dev->sg = NULL; - if (!entry || entry->handle != request.handle) + if (!entry || entry->handle != handle) return -EINVAL; DRM_DEBUG("sg free virtual = %p\n", entry->virtual); @@ -239,3 +232,19 @@ int drm_sg_free(struct inode *inode, struct file *filp, return 0; } + +EXPORT_SYMBOL(drm_sg_free); + +int drm_sg_free_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_scatter_gather __user *argp = (void __user *)arg; + struct drm_scatter_gather request; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + return drm_sg_free(dev, request.handle); +} From 8d60bf2f199d57ec45feaab836b31832b9bbabb9 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 22:15:41 -0700 Subject: [PATCH 134/437] Add XP5 and XP10 PCI IDs. --- shared-core/drm_pciids.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 126974d0..4504cf77 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -720,3 +720,6 @@ 0x12d2 0x002c NV_04 "VTNT2" 0x12d2 0x00a0 NV_04 "ITNT2" +[xgi] +0x18ca 0x2200 0 "XP5" +0x18ca 0x0047 0 "XP10 / XG47" From bff698d0edef90272247dfb90e454f7b98fd82dd Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 17 Jul 2007 09:59:26 +1000 Subject: [PATCH 135/437] drm_context: fix braino --- linux-core/drm_context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 95d28898..a0b1a7ec 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -302,7 +302,7 @@ int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_ctx_res res; - struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx_res __user *argp = (void __user *)arg; struct drm_ctx ctx; int i; From 6ad1df217647d112a21c2e004d4e3d74c7bb0e0e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jul 2007 09:42:06 +1000 Subject: [PATCH 136/437] drm: remove drm_u64_t, replace with uint64_t everwhere This might break something, stdint.h inclusion in drm.h maybe required but I'm not sure yet what platforms have it what ones don't. --- libdrm/xf86drm.c | 9 ++++----- libdrm/xf86drm.h | 1 + libdrm/xf86drmHash.c | 1 - libdrm/xf86drmRandom.c | 1 - libdrm/xf86drmSL.c | 1 - libdrm/xf86mm.h | 12 ++++++------ linux-core/drmP.h | 8 ++++---- linux-core/drm_bo.c | 2 +- linux-core/drm_memory.c | 12 ++++++------ linux-core/drm_proc.c | 6 +++--- shared-core/drm.h | 39 ++++++++++++++++----------------------- 11 files changed, 41 insertions(+), 51 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 52a6d92f..8cee4fbc 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -49,7 +49,6 @@ #include #include #include -#include "drm.h" /* Not all systems have MAP_FAILED defined */ #ifndef MAP_FAILED @@ -2698,7 +2697,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) int drmBOCreate(int fd, unsigned long start, unsigned long size, unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, - drm_u64_t mask, + uint64_t mask, unsigned hint, drmBO *buf) { struct drm_bo_create_arg arg; @@ -2879,7 +2878,7 @@ int drmBOUnmap(int fd, drmBO *buf) } int drmBOValidate(int fd, drmBO *buf, - drm_u64_t flags, drm_u64_t mask, + uint64_t flags, uint64_t mask, unsigned hint) { struct drm_bo_op_arg arg; @@ -3057,7 +3056,7 @@ int drmBOValidateList(int fd, drmBOList *list) struct drm_bo_op_arg *arg, *first; struct drm_bo_op_req *req; struct drm_bo_arg_rep *rep; - drm_u64_t *prevNext = NULL; + uint64_t *prevNext = NULL; drmBO *buf; int ret; @@ -3121,7 +3120,7 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) struct drm_bo_op_arg *arg, *first; struct drm_bo_op_req *req; struct drm_bo_arg_rep *rep; - drm_u64_t *prevNext = NULL; + uint64_t *prevNext = NULL; drmBO *buf; unsigned fence_flags; int ret; diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h index d4260cc9..230f54ce 100644 --- a/libdrm/xf86drm.h +++ b/libdrm/xf86drm.h @@ -36,6 +36,7 @@ #include #include +#include #include /* Defaults, if nothing set in xf86config */ diff --git a/libdrm/xf86drmHash.c b/libdrm/xf86drmHash.c index d1ade063..82cbc2a5 100644 --- a/libdrm/xf86drmHash.c +++ b/libdrm/xf86drmHash.c @@ -74,7 +74,6 @@ #define HASH_MAIN 0 #if !HASH_MAIN -# include "drm.h" # include "xf86drm.h" #endif diff --git a/libdrm/xf86drmRandom.c b/libdrm/xf86drmRandom.c index 61ffb078..ecab9e2d 100644 --- a/libdrm/xf86drmRandom.c +++ b/libdrm/xf86drmRandom.c @@ -77,7 +77,6 @@ #define RANDOM_MAIN 0 #if !RANDOM_MAIN -# include "drm.h" # include "xf86drm.h" #endif diff --git a/libdrm/xf86drmSL.c b/libdrm/xf86drmSL.c index ce60648d..58aefac7 100644 --- a/libdrm/xf86drmSL.c +++ b/libdrm/xf86drmSL.c @@ -44,7 +44,6 @@ #define SL_MAIN 0 #if !SL_MAIN -# include "drm.h" # include "xf86drm.h" #else # include diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index 61978bc9..d1e0b28f 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -107,9 +107,9 @@ typedef struct _drmBO { drm_bo_type_t type; unsigned handle; - drm_u64_t mapHandle; - drm_u64_t flags; - drm_u64_t mask; + uint64_t mapHandle; + uint64_t flags; + uint64_t mask; unsigned mapFlags; unsigned long size; unsigned long offset; @@ -180,7 +180,7 @@ extern int drmBOCreateList(int numTarget, drmBOList *list); extern int drmBOCreate(int fd, unsigned long start, unsigned long size, unsigned pageAlignment,void *user_buffer, - drm_bo_type_t type, drm_u64_t mask, + drm_bo_type_t type, uint64_t mask, unsigned hint, drmBO *buf); extern int drmBODestroy(int fd, drmBO *buf); extern int drmBOReference(int fd, unsigned handle, drmBO *buf); @@ -188,8 +188,8 @@ extern int drmBOUnReference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address); extern int drmBOUnmap(int fd, drmBO *buf); -extern int drmBOValidate(int fd, drmBO *buf, drm_u64_t flags, - drm_u64_t mask, unsigned hint); +extern int drmBOValidate(int fd, drmBO *buf, uint64_t flags, + uint64_t mask, unsigned hint); extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle); extern int drmBOInfo(int fd, drmBO *buf); diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 19e9d627..c5dfe6bf 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -551,7 +551,7 @@ struct drm_map_list { struct list_head head; /**< list head */ struct drm_hash_item hash; struct drm_map *map; /**< mapping */ - drm_u64_t user_token; + uint64_t user_token; struct drm_mm_node *file_offset_node; }; @@ -931,9 +931,9 @@ extern int drm_unbind_agp(DRM_AGP_MEM * handle); extern void drm_free_memctl(size_t size); extern int drm_alloc_memctl(size_t size); -extern void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold); +extern void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold); extern void drm_init_memctl(size_t low_threshold, size_t high_threshold, size_t unit_size); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 681d37fe..374be04e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2629,7 +2629,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) return -ENOMEM; } - list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; return 0; } diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index 454c33e8..f68a3a3e 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -38,9 +38,9 @@ static struct { spinlock_t lock; - drm_u64_t cur_used; - drm_u64_t low_threshold; - drm_u64_t high_threshold; + uint64_t cur_used; + uint64_t low_threshold; + uint64_t high_threshold; } drm_memctl = { .lock = SPIN_LOCK_UNLOCKED }; @@ -82,9 +82,9 @@ void drm_free_memctl(size_t size) } EXPORT_SYMBOL(drm_free_memctl); -void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold) +void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold) { spin_lock(&drm_memctl.lock); *cur_used = drm_memctl.cur_used; diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 3f9cb028..08bf99d6 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -436,9 +436,9 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, int len = 0; struct drm_buffer_manager *bm = &dev->bm; struct drm_fence_manager *fm = &dev->fm; - drm_u64_t used_mem; - drm_u64_t low_mem; - drm_u64_t high_mem; + uint64_t used_mem; + uint64_t low_mem; + uint64_t high_mem; if (offset > DRM_PROC_LIMIT) { diff --git a/shared-core/drm.h b/shared-core/drm.h index 3ab63d5d..de8967ad 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -127,16 +127,9 @@ #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #if defined(__linux__) -#if defined(__KERNEL__) -typedef __u64 drm_u64_t; -#else -typedef unsigned long long drm_u64_t; -#endif - typedef unsigned int drm_handle_t; #else #include -typedef u_int64_t drm_u64_t; typedef unsigned long drm_handle_t; /**< To mapped regions */ #endif typedef unsigned int drm_context_t; /**< GLXContext handle */ @@ -677,7 +670,7 @@ struct drm_fence_arg { unsigned int flags; unsigned int signaled; unsigned int pad64; - drm_u64_t expand_pad[3]; /*Future expansion */ + uint64_t expand_pad[3]; /*Future expansion */ }; /* Buffer permissions, referring to how the GPU uses the buffers. @@ -792,8 +785,8 @@ enum drm_bo_type { }; struct drm_bo_info_req { - drm_u64_t mask; - drm_u64_t flags; + uint64_t mask; + uint64_t flags; unsigned int handle; unsigned int hint; unsigned int fence_class; @@ -801,9 +794,9 @@ struct drm_bo_info_req { }; struct drm_bo_create_req { - drm_u64_t mask; - drm_u64_t size; - drm_u64_t buffer_start; + uint64_t mask; + uint64_t size; + uint64_t buffer_start; unsigned int hint; unsigned int page_alignment; enum drm_bo_type type; @@ -827,12 +820,12 @@ struct drm_bo_op_req { #define DRM_BO_REP_BUSY 0x00000001 struct drm_bo_info_rep { - drm_u64_t flags; - drm_u64_t mask; - drm_u64_t size; - drm_u64_t offset; - drm_u64_t arg_handle; - drm_u64_t buffer_start; + uint64_t flags; + uint64_t mask; + uint64_t size; + uint64_t offset; + uint64_t arg_handle; + uint64_t buffer_start; unsigned int handle; unsigned int fence_flags; unsigned int rep_flags; @@ -841,7 +834,7 @@ struct drm_bo_info_rep { unsigned int hw_tile_stride; unsigned int tile_info; unsigned int pad64; - drm_u64_t expand_pad[4]; /*Future expansion */ + uint64_t expand_pad[4]; /*Future expansion */ }; struct drm_bo_arg_rep { @@ -876,7 +869,7 @@ struct drm_bo_map_wait_idle_arg { }; struct drm_bo_op_arg { - drm_u64_t next; + uint64_t next; union { struct drm_bo_op_req req; struct drm_bo_arg_rep rep; @@ -905,8 +898,8 @@ struct drm_mm_init_arg { unsigned int major; unsigned int minor; unsigned int mem_type; - drm_u64_t p_offset; - drm_u64_t p_size; + uint64_t p_offset; + uint64_t p_size; }; /** From 3a71e87742ce8686c2b3c85ebbc8fb7a72b4f6e0 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jul 2007 09:46:16 +1000 Subject: [PATCH 137/437] drm: idr stuff is upstream for 2.6.23 --- linux-core/drm_compat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 024059ac..0b00ba47 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -306,8 +306,10 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma); #endif -/* fixme when functions are upstreamed */ +/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) #define DRM_IDR_COMPAT_FN +#endif #ifdef DRM_IDR_COMPAT_FN int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); From a64b5d8d3763639fbb4098500ad5c86fb8590aa7 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jul 2007 15:49:45 +1000 Subject: [PATCH 138/437] fix some missing whitespace/tab --- linux-core/drm_fops.c | 2 +- shared-core/drm.h | 12 ++++++------ shared-core/i915_dma.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 98e581fe..d542d4e3 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -335,7 +335,7 @@ EXPORT_SYMBOL(drm_fasync); static void drm_object_release(struct file *filp) { - struct drm_file *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct list_head *head; struct drm_user_object *user_object; struct drm_ref_object *ref_object; diff --git a/shared-core/drm.h b/shared-core/drm.h index de8967ad..db913b1f 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -786,15 +786,15 @@ enum drm_bo_type { struct drm_bo_info_req { uint64_t mask; - uint64_t flags; - unsigned int handle; + uint64_t flags; + unsigned int handle; unsigned int hint; unsigned int fence_class; unsigned int pad64; }; struct drm_bo_create_req { - uint64_t mask; + uint64_t mask; uint64_t size; uint64_t buffer_start; unsigned int hint; @@ -820,8 +820,8 @@ struct drm_bo_op_req { #define DRM_BO_REP_BUSY 0x00000001 struct drm_bo_info_rep { - uint64_t flags; - uint64_t mask; + uint64_t flags; + uint64_t mask; uint64_t size; uint64_t offset; uint64_t arg_handle; @@ -831,7 +831,7 @@ struct drm_bo_info_rep { unsigned int rep_flags; unsigned int page_alignment; unsigned int desired_tile_stride; - unsigned int hw_tile_stride; + unsigned int hw_tile_stride; unsigned int tile_info; unsigned int pad64; uint64_t expand_pad[4]; /*Future expansion */ diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index fbad27c0..c3a41bd5 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -874,7 +874,7 @@ static int i915_mmio(DRM_IOCTL_ARGS) e = &mmio_table[mmio.reg]; base = (u8 *) dev_priv->mmio_map->handle + e->offset; - switch (mmio.read_write) { + switch (mmio.read_write) { case I915_MMIO_READ: if (!(e->flag & I915_MMIO_MAY_READ)) return DRM_ERR(EINVAL); From 1ff858fe3a6b632c879a9f99a67227db7df70b62 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 18 Jul 2007 10:40:03 +0200 Subject: [PATCH 139/437] Fix via dmablit when blit queue is full. Fix by Simon Farnsworth, Bugzilla Bug #11542 http://bugs.freedesktop.org/show_bug.cgi?id=11542 --- linux-core/via_dmablit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 5108c867..6422609c 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -568,7 +568,7 @@ via_init_dmablit(struct drm_device *dev) blitq->head = 0; blitq->cur = 0; blitq->serviced = 0; - blitq->num_free = VIA_NUM_BLIT_SLOTS; + blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; blitq->num_outstanding = 0; blitq->is_active = 0; blitq->aborting = 0; From 14ecf8d6c2ccecbe9841ad4a7eb3b301685f2351 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Fri, 6 Jul 2007 12:47:53 +0300 Subject: [PATCH 140/437] nouveau: Replace 0x00400104 and 0x00400108 with names. NV03_PGRAPH_NSTATUS and NV03_PGRAPH_NSOURCE. The prefix NV03 is chosen because nv10reg.h had no versioned prefix, and the code using these registers does not check card_type. --- shared-core/nouveau_irq.c | 15 ++++++++------- shared-core/nouveau_reg.h | 2 ++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 84319219..ffc4bac6 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -266,7 +266,8 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) } DRM_ERROR("NV: nSource: 0x%08x, nStatus: 0x%08x\n", - NV_READ(0x400108), NV_READ(0x400104)); + NV_READ(NV03_PGRAPH_NSOURCE), + NV_READ(NV03_PGRAPH_NSTATUS)); DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -" "Method 0x%04x, Data 0x%08x\n", channel, subc, class, method, data @@ -286,8 +287,8 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) uint32_t nsource, nstatus, instance, notify; DRM_DEBUG("NV: PGRAPH notify interrupt\n"); - nstatus = NV_READ(0x00400104); - nsource = NV_READ(0x00400108); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); /* if this wasn't NOTIFICATION_PENDING, dump extra trap info */ @@ -308,8 +309,8 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) uint32_t nsource, nstatus, instance, notify; DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n"); - nstatus = NV_READ(0x00400104); - nsource = NV_READ(0x00400108); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); instance = NV_READ(0x00400158); @@ -332,8 +333,8 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) DRM_ERROR("NV: PGRAPH error interrupt\n"); - nstatus = NV_READ(0x00400104); - nsource = NV_READ(0x00400108); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); DRM_ERROR("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); instance = NV_READ(0x00400158); diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index a66d2d34..bcdb1a9f 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -110,6 +110,8 @@ #define NV04_PGRAPH_DEBUG_3 0x0040008c #define NV10_PGRAPH_DEBUG_4 0x00400090 #define NV03_PGRAPH_INTR 0x00400100 +#define NV03_PGRAPH_NSTATUS 0x00400104 +#define NV03_PGRAPH_NSOURCE 0x00400108 #define NV03_PGRAPH_INTR_EN 0x00400140 #define NV40_PGRAPH_INTR_EN 0x0040013C # define NV_PGRAPH_INTR_NOTIFY (1<< 0) From 0c77f5abeadcbb89643740889cc865ba0ae66538 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Fri, 6 Jul 2007 13:57:31 +0300 Subject: [PATCH 141/437] nouveau: Add bitfield names for NSOURCE and NSTATUS. Name strings and pretty-printing in nouveau_graph_dump_trap_info(). --- shared-core/nouveau_irq.c | 71 ++++++++++++++++++++++++++++++++++++--- shared-core/nouveau_reg.h | 23 +++++++++++++ 2 files changed, 90 insertions(+), 4 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index ffc4bac6..f7baf89e 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -246,6 +246,61 @@ static void nouveau_nv04_context_switch(struct drm_device *dev) } #endif + +struct nouveau_bitfield_names +{ + uint32_t mask; + const char * name; +}; + +static struct nouveau_bitfield_names nouveau_nstatus_names[] = +{ + { NV03_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV03_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV03_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV03_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +}; + +static struct nouveau_bitfield_names nouveau_nsource_names[] = +{ + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, +}; + +static void +nouveau_print_bitfield_names(uint32_t value, + const struct nouveau_bitfield_names *namelist, + const int namelist_len) +{ + int i; + for(i=0; i> 20) & 0x1F; subc = (address >> 16) & 0x7; method = address & 0x1FFC; data = NV_READ(0x400708); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); if (dev_priv->card_type < NV_50) { class = NV_READ(0x400160 + subc*4) & 0xFFFF; } else { class = NV_READ(0x400814); } - DRM_ERROR("NV: nSource: 0x%08x, nStatus: 0x%08x\n", - NV_READ(NV03_PGRAPH_NSOURCE), - NV_READ(NV03_PGRAPH_NSTATUS)); - DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -" + DRM_ERROR("nSource:"); + nouveau_print_bitfield_names(nsource, nouveau_nsource_names, + ARRAY_SIZE(nouveau_nsource_names)); + printk(", nStatus:"); + nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, + ARRAY_SIZE(nouveau_nstatus_names)); + printk("\n"); + + DRM_ERROR("NV: Channel %d/%d (class 0x%04x) - " "Method 0x%04x, Data 0x%08x\n", channel, subc, class, method, data ); diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index bcdb1a9f..47d54b2a 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -111,7 +111,30 @@ #define NV10_PGRAPH_DEBUG_4 0x00400090 #define NV03_PGRAPH_INTR 0x00400100 #define NV03_PGRAPH_NSTATUS 0x00400104 +# define NV03_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) +# define NV03_PGRAPH_NSTATUS_INVALID_STATE (1<<24) +# define NV03_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) +# define NV03_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) #define NV03_PGRAPH_NSOURCE 0x00400108 +# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0) +# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1) +# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2) +# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3) +# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4) +# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5) +# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6) +# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7) +# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8) +# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9) +# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) +# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) +# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) +# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) +# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) +# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) +# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) #define NV03_PGRAPH_INTR_EN 0x00400140 #define NV40_PGRAPH_INTR_EN 0x0040013C # define NV_PGRAPH_INTR_NOTIFY (1<< 0) From 696bee093f6f75dbb48699ff32bbebe2d3a1e307 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Fri, 6 Jul 2007 19:34:15 +0300 Subject: [PATCH 142/437] nouveau: Add read() method to Engine.timer. This is not called from anywhere, yet. --- shared-core/nouveau_drv.h | 2 ++ shared-core/nouveau_state.c | 8 ++++++++ shared-core/nv04_timer.c | 21 +++++++++++++++++++++ 3 files changed, 31 insertions(+) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index f68304c9..9e11f9b7 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -157,6 +157,7 @@ struct nouveau_engine_func { struct { int (*init)(struct drm_device *dev); + uint64_t (*read)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); } timer; @@ -469,6 +470,7 @@ extern void nv50_mc_takedown(struct drm_device *dev); /* nv04_timer.c */ extern int nv04_timer_init(struct drm_device *dev); +extern uint64_t nv04_timer_read(struct drm_device *dev); extern void nv04_timer_takedown(struct drm_device *dev); extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 4e3b39dd..5b67aea1 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -88,6 +88,8 @@ static int nouveau_init_card_mappings(struct drm_device *dev) static int nouveau_stub_init(struct drm_device *dev) { return 0; } static void nouveau_stub_takedown(struct drm_device *dev) {} +static uint64_t nouveau_stub_timer_read(struct drm_device *dev) { return 0; } + static int nouveau_init_engine_ptrs(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -104,6 +106,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv04_fb_init; engine->fb.takedown = nv04_fb_takedown; @@ -130,6 +133,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; @@ -156,6 +160,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; @@ -182,6 +187,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; @@ -208,6 +214,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv40_fb_init; engine->fb.takedown = nv40_fb_takedown; @@ -235,6 +242,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; engine->timer.init = nouveau_stub_init; + engine->timer.read = nouveau_stub_timer_read; engine->timer.takedown = nouveau_stub_takedown; engine->fb.init = nouveau_stub_init; engine->fb.takedown = nouveau_stub_takedown; diff --git a/shared-core/nv04_timer.c b/shared-core/nv04_timer.c index efe78da7..08a27f4f 100644 --- a/shared-core/nv04_timer.c +++ b/shared-core/nv04_timer.c @@ -17,6 +17,27 @@ nv04_timer_init(struct drm_device *dev) return 0; } +uint64_t +nv04_timer_read(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t low; + /* From kmmio dumps on nv28 this looks like how the blob does this. + * It reads the high dword twice, before and after. + * The only explanation seems to be that the 64-bit timer counter + * advances between high and low dword reads and may corrupt the + * result. Not confirmed. + */ + uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1); + uint32_t high1; + do { + high1 = high2; + low = NV_READ(NV04_PTIMER_TIME_0); + high2 = NV_READ(NV04_PTIMER_TIME_1); + } while(high1 != high2); + return (((uint64_t)high2) << 32) | (uint64_t)low; +} + void nv04_timer_takedown(struct drm_device *dev) { From af4cfa624a005f7105db89f6f076c41adbe44bd3 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Fri, 6 Jul 2007 20:33:32 +0300 Subject: [PATCH 143/437] nouveau: Make nouveau_wait_for_idle() read PTIMER. Following my nv28 kmmio dumps, nouveau_wait_for_idle() is modified to read PTIMER and NV03_PMC_ENABLE. Also a timeout based on PTIMER value is added, so wait_for_idle() cannot stall indefinitely (unless PTIMER is halted). The timeout was selected as 1 giga-ticks, which for me is 1s. --- shared-core/nouveau_state.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 5b67aea1..a26ecea3 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -522,16 +522,31 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) void nouveau_wait_for_idle(struct drm_device *dev) { struct drm_nouveau_private *dev_priv=dev->dev_private; - switch(dev_priv->card_type) - { - case NV_03: - while(NV_READ(NV03_PGRAPH_STATUS)); - break; - case NV_50: - break; - default: - while(NV_READ(NV04_PGRAPH_STATUS)); - break; + switch(dev_priv->card_type) { + case NV_03: + while (NV_READ(NV03_PGRAPH_STATUS)); + break; + case NV_50: + break; + default: { + /* This stuff is more or less a copy of what is seen + * in nv28 kmmio dump. + */ + uint64_t started = dev_priv->Engine.timer.read(dev); + uint64_t stopped = started; + uint32_t status; + do { + uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); + status = NV_READ(NV04_PGRAPH_STATUS); + if (!status) + break; + stopped = dev_priv->Engine.timer.read(dev); + /* It'll never wrap anyway... */ + } while (stopped - started < 1000000000ULL); + if (status) + DRM_ERROR("timed out with status 0x%08x\n", + status); + } } } From 33a50412c21229610dbb75dee83f145e2f1ec128 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 18 Jul 2007 14:22:40 -0700 Subject: [PATCH 144/437] Add dry-coded DRM drawable private information storage for FreeBSD. With this, all modules build again. --- bsd-core/drmP.h | 25 ++++++++- bsd-core/drm_dma.c | 4 +- bsd-core/drm_drawable.c | 119 ++++++++++++++++++++++++++++++++++++++-- bsd-core/drm_drv.c | 12 +++- bsd-core/drm_irq.c | 51 ++++++++++++++++- bsd-core/drm_scatter.c | 1 + linux-core/drmP.h | 14 +++++ shared-core/i915_drv.h | 4 +- shared-core/i915_irq.c | 70 +++++++++++------------ 9 files changed, 248 insertions(+), 52 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index b2ecd4d4..fd8f4a27 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -59,6 +59,8 @@ typedef struct drm_file drm_file_t; #include #include #include +#include +#include #include #include #include @@ -152,6 +154,7 @@ typedef struct drm_file drm_file_t; #define DRM_MEM_CTXBITMAP 17 #define DRM_MEM_STUB 18 #define DRM_MEM_SGLISTS 19 +#define DRM_MEM_DRAWABLE 20 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) @@ -184,10 +187,15 @@ MALLOC_DECLARE(M_DRM); #define DRM_CURPROC curthread #define DRM_STRUCTPROC struct thread #define DRM_SPINTYPE struct mtx -#define DRM_SPININIT(l,name) mtx_init(&l, name, NULL, MTX_DEF) -#define DRM_SPINUNINIT(l) mtx_destroy(&l) +#define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF) +#define DRM_SPINUNINIT(l) mtx_destroy(l) #define DRM_SPINLOCK(l) mtx_lock(l) -#define DRM_SPINUNLOCK(u) mtx_unlock(u); +#define DRM_SPINUNLOCK(u) mtx_unlock(u) +#define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \ + mtx_lock(l); \ + (void)irqflags; \ +} while (0) +#define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u) #define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED) #define DRM_CURRENTPID curthread->td_proc->p_pid #define DRM_LOCK() mtx_lock(&dev->dev_lock) @@ -732,6 +740,8 @@ struct drm_device { struct mtx irq_lock; /* protects irq condition checks */ struct mtx dev_lock; /* protects everything else */ #endif + DRM_SPINTYPE drw_lock; + /* Usage Counters */ int open_count; /* Outstanding files open */ int buf_use; /* Buffers in use -- cannot alloc */ @@ -797,6 +807,13 @@ struct drm_device { void *dev_private; unsigned int agp_buffer_token; drm_local_map_t *agp_buffer_map; + + struct unrhdr *drw_unrhdr; + /* RB tree of drawable infos */ + RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head; + + struct task locked_task; + void (*locked_task_call)(drm_device_t *dev); }; extern int drm_debug_flag; @@ -959,6 +976,8 @@ int drm_getsareactx(DRM_IOCTL_ARGS); /* Drawable IOCTL support (drm_drawable.c) */ int drm_adddraw(DRM_IOCTL_ARGS); int drm_rmdraw(DRM_IOCTL_ARGS); +int drm_update_draw(DRM_IOCTL_ARGS); +struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, int handle); /* Authentication IOCTL support (drm_auth.c) */ int drm_getmagic(DRM_IOCTL_ARGS); diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c index 67b3fe2d..086a9fa2 100644 --- a/bsd-core/drm_dma.c +++ b/bsd-core/drm_dma.c @@ -40,7 +40,7 @@ int drm_dma_setup(drm_device_t *dev) if (dev->dma == NULL) return DRM_ERR(ENOMEM); - DRM_SPININIT(dev->dma_lock, "drmdma"); + DRM_SPININIT(&dev->dma_lock, "drmdma"); return 0; } @@ -80,7 +80,7 @@ void drm_dma_takedown(drm_device_t *dev) free(dma->pagelist, M_DRM); free(dev->dma, M_DRM); dev->dma = NULL; - DRM_SPINUNINIT(dev->dma_lock); + DRM_SPINUNINIT(&dev->dma_lock); } diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index 379e0aa7..bc3ad571 100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -33,19 +33,130 @@ #include "drmP.h" +struct bsd_drm_drawable_info { + struct drm_drawable_info info; + int handle; + RB_ENTRY(bsd_drm_drawable_info) tree; +}; + +static int +drm_drawable_compare(struct bsd_drm_drawable_info *a, + struct bsd_drm_drawable_info *b) +{ + if (a->handle > b->handle) + return 1; + if (a->handle > b->handle) + return -1; + return 0; +} + +RB_GENERATE_STATIC(drawable_tree, bsd_drm_drawable_info, tree, + drm_drawable_compare); + +struct drm_drawable_info * +drm_get_drawable_info(drm_device_t *dev, int handle) +{ + struct bsd_drm_drawable_info find, *result; + + find.handle = handle; + result = RB_FIND(drawable_tree, &dev->drw_head, &find); + + return &result->info; +} + +static struct drm_drawable_info * +drm_drawable_info_alloc(drm_device_t *dev, int handle) +{ + struct bsd_drm_drawable_info *info; + + info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info), + DRM_MEM_DRAWABLE); + if (info == NULL) + return NULL; + + info->handle = handle; + RB_INSERT(drawable_tree, &dev->drw_head, info); + + return &info->info; +} + +static void +drm_drawable_info_free(drm_device_t *dev, struct drm_drawable_info *info) +{ + RB_REMOVE(drawable_tree, &dev->drw_head, + (struct bsd_drm_drawable_info *)info); + drm_free(info, sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE); +} + int drm_adddraw(DRM_IOCTL_ARGS) { + DRM_DEVICE; drm_draw_t draw; - draw.handle = 0; /* NOOP */ + draw.handle = alloc_unr(dev->drw_unrhdr); DRM_DEBUG("%d\n", draw.handle); - - DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) ); + + DRM_COPY_TO_USER_IOCTL((drm_draw_t *)data, draw, sizeof(draw)); return 0; } int drm_rmdraw(DRM_IOCTL_ARGS) { - return 0; /* NOOP */ + DRM_DEVICE; + drm_draw_t *draw = (drm_draw_t *)data; + struct drm_drawable_info *info; + + free_unr(dev->drw_unrhdr, draw->handle); + + info = drm_get_drawable_info(dev, draw->handle); + if (info != NULL) { + drm_drawable_info_free(dev, info); + } + + return 0; +} + +int drm_update_draw(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_drawable_info *info; + struct drm_update_draw *update = (struct drm_update_draw *)data; + + info = drm_get_drawable_info(dev, update->handle); + if (info == NULL) { + info = drm_drawable_info_alloc(dev, update->handle); + if (info == NULL) + return ENOMEM; + } + + switch (update->type) { + case DRM_DRAWABLE_CLIPRECTS: + DRM_SPINLOCK(&dev->drw_lock); + if (update->num != info->num_rects) { + drm_free(info->rects, + sizeof(*info->rects) * info->num_rects, + DRM_MEM_DRAWABLE); + info->rects = NULL; + info->num_rects = 0; + } + if (update->num == 0) { + DRM_SPINUNLOCK(&dev->drw_lock); + return 0; + } + if (info->rects == NULL) { + info->rects = drm_alloc(sizeof(*info->rects) * + update->num, DRM_MEM_DRAWABLE); + if (info->rects == NULL) + return ENOMEM; + info->num_rects = update->num; + } + /* For some reason the pointer arg is unsigned long long. */ + copyin((void *)(intptr_t)update->data, info->rects, + sizeof(*info->rects) * info->num_rects); + DRM_SPINUNLOCK(&dev->drw_lock); + return 0; + default: + return EINVAL; + } } diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 069774c1..4be4cd30 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -31,6 +31,7 @@ * */ +#include #include "drmP.h" #include "drm.h" #include "drm_sarea.h" @@ -121,6 +122,7 @@ static drm_ioctl_desc_t drm_ioctls[256] = { [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = { drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, }; #ifdef __FreeBSD__ @@ -556,7 +558,13 @@ static int drm_load(drm_device_t *dev) DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error; } - + + dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL); + if (dev->drw_unrhdr == NULL) { + DRM_ERROR("Couldn't allocate drawable number allocator\n"); + goto error; + } + DRM_INFO("Initialized %s %d.%d.%d %s\n", dev->driver.name, dev->driver.major, @@ -628,6 +636,8 @@ static void drm_unload(drm_device_t *dev) if (dev->driver.unload != NULL) dev->driver.unload(dev); + delete_unrhdr(dev->drw_unrhdr); + drm_mem_uninit(); #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 mtx_destroy(&dev->dev_lock); diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index f7da5ed7..2a69e014 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -31,6 +31,8 @@ #include "drmP.h" #include "drm.h" +static void drm_locked_task(void *context, int pending __unused); + int drm_irq_by_busid(DRM_IOCTL_ARGS) { DRM_DEVICE; @@ -87,7 +89,7 @@ int drm_irq_install(drm_device_t *dev) dev->context_flag = 0; - DRM_SPININIT(dev->irq_lock, "DRM IRQ lock"); + DRM_SPININIT(&dev->irq_lock, "DRM IRQ lock"); /* Before installing handler */ dev->driver.irq_preinstall(dev); @@ -131,6 +133,7 @@ int drm_irq_install(drm_device_t *dev) dev->driver.irq_postinstall(dev); DRM_UNLOCK(); + TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev); return 0; err: DRM_LOCK(); @@ -142,7 +145,7 @@ err: dev->irqrid = 0; } #endif - DRM_SPINUNINIT(dev->irq_lock); + DRM_SPINUNINIT(&dev->irq_lock); DRM_UNLOCK(); return retcode; } @@ -174,7 +177,7 @@ int drm_irq_uninstall(drm_device_t *dev) #elif defined(__NetBSD__) || defined(__OpenBSD__) pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh); #endif - DRM_SPINUNINIT(dev->irq_lock); + DRM_SPINUNINIT(&dev->irq_lock); return 0; } @@ -291,3 +294,45 @@ void drm_vbl_send_signals( drm_device_t *dev ) } } #endif + +static void drm_locked_task(void *context, int pending __unused) +{ + drm_device_t *dev = context; + + DRM_LOCK(); + for (;;) { + int ret; + + if (drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) + { + dev->lock.filp = (void *)(uintptr_t)DRM_CURRENTPID; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } + + /* Contention */ +#if defined(__FreeBSD__) && __FreeBSD_version > 500000 + ret = msleep((void *)&dev->lock.lock_queue, &dev->dev_lock, + PZERO | PCATCH, "drmlk2", 0); +#else + ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH, + "drmlk2", 0); +#endif + if (ret != 0) + return; + } + DRM_UNLOCK(); + + dev->locked_task_call(dev); + + drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); +} + +void +drm_locked_tasklet(drm_device_t *dev, void (*tasklet)(drm_device_t *dev)) +{ + dev->locked_task_call = tasklet; + taskqueue_enqueue(taskqueue_swi, &dev->locked_task); +} diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c index 99eae408..3c0be4a0 100644 --- a/bsd-core/drm_scatter.c +++ b/bsd-core/drm_scatter.c @@ -90,6 +90,7 @@ int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) dev->sg = entry; DRM_UNLOCK(); + return 0; } int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2bbc6200..3b2176c9 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1264,5 +1264,19 @@ static inline void drm_ctl_free(void *pt, size_t size, int area) /*@}*/ +/** Type for the OS's non-sleepable mutex lock */ +#define DRM_SPINTYPE spinlock_t +/** + * Initialize the lock for use. name is an optional string describing the + * lock + */ +#define DRM_SPININIT(l,name) spin_lock_init(l); +#define DRM_SPINUNINIT(l) +#define DRM_SPINLOCK(l) spin_lock(l); +#define DRM_SPINUNLOCK(u) spin_unlock(l); +#define DRM_SPINLOCK_IRQSAVE(l, flags) spin_lock_irqflags(l, _flags); +#define DRM_SPINUNLOCK_IRQRESTORE(u, flags) spin_unlock_irqrestore(l, _flags); +#define DRM_SPINLOCK_ASSERT(l) do {} while (0) + #endif /* __KERNEL__ */ #endif diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 60b32b0d..e9447f2b 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -118,7 +118,7 @@ typedef struct drm_i915_private { struct mem_block *agp_heap; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; - spinlock_t user_irq_lock; + DRM_SPINTYPE user_irq_lock; int user_irq_refcount; int fence_irq_on; uint32_t irq_enable_reg; @@ -133,7 +133,7 @@ typedef struct drm_i915_private { #ifdef I915_HAVE_BUFFER void *agp_iomap; #endif - spinlock_t swaps_lock; + DRM_SPINTYPE swaps_lock; drm_i915_vbl_swap_t vbl_swaps; unsigned int swaps_pending; } drm_i915_private_t; diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 2f6a6b95..40724fae 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -50,6 +50,8 @@ i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) u16 x1, y1, x2, y2; int pf_pipes = 1 << pipe; + DRM_SPINLOCK_ASSERT(&dev->drw_lock); + /* If the window is visible on the other pipe, we have to flip on that * pipe as well. */ @@ -89,7 +91,6 @@ i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) static void i915_vblank_tasklet(drm_device_t *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; struct list_head *list, *tmp, hits, *hit; int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; unsigned counter[2] = { atomic_read(&dev->vbl_received), @@ -111,7 +112,12 @@ static void i915_vblank_tasklet(drm_device_t *dev) nhits = nrects = 0; - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + /* No irqsave/restore necessary. This tasklet may be run in an + * interrupt context or normal context, but we don't have to worry + * about getting interrupted by something acquiring the lock, because + * we are the interrupt context thing that acquires the lock. + */ + DRM_SPINLOCK(&dev_priv->swaps_lock); /* Find buffer swaps scheduled for this vertical blank */ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { @@ -124,15 +130,15 @@ static void i915_vblank_tasklet(drm_device_t *dev) list_del(list); dev_priv->swaps_pending--; - spin_unlock(&dev_priv->swaps_lock); - spin_lock(&dev->drw_lock); + DRM_SPINUNLOCK(&dev_priv->swaps_lock); + DRM_SPINLOCK(&dev->drw_lock); drw = drm_get_drawable_info(dev, vbl_swap->drw_id); if (!drw) { - spin_unlock(&dev->drw_lock); + DRM_SPINUNLOCK(&dev->drw_lock); drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); - spin_lock(&dev_priv->swaps_lock); + DRM_SPINLOCK(&dev_priv->swaps_lock); continue; } @@ -149,7 +155,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) } } - spin_unlock(&dev->drw_lock); + DRM_SPINUNLOCK(&dev->drw_lock); /* List of hits was empty, or we reached the end of it */ if (hit == &hits) @@ -157,16 +163,15 @@ static void i915_vblank_tasklet(drm_device_t *dev) nhits++; - spin_lock(&dev_priv->swaps_lock); + DRM_SPINLOCK(&dev_priv->swaps_lock); } + DRM_SPINUNLOCK(&dev->drw_lock); + if (nhits == 0) { - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); return; } - spin_unlock(&dev_priv->swaps_lock); - i915_kernel_lost_context(dev); upper[0] = upper[1] = 0; @@ -180,7 +185,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) offsets[2] = sarea_priv->third_offset; num_pages = sarea_priv->third_handle ? 3 : 2; - spin_lock(&dev->drw_lock); + DRM_SPINLOCK(&dev->drw_lock); /* Emit blits for buffer swaps, partitioning both outputs into as many * slices as there are buffer swaps scheduled in order to avoid tearing @@ -262,7 +267,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) } } - spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_SPINUNLOCK(&dev->drw_lock); list_for_each_safe(hit, tmp, &hits) { drm_i915_vbl_swap_t *swap_hit = @@ -362,23 +367,23 @@ int i915_emit_irq(drm_device_t * dev) void i915_user_irq_on(drm_i915_private_t *dev_priv) { - spin_lock(&dev_priv->user_irq_lock); + DRM_SPINLOCK(&dev_priv->user_irq_lock); if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){ dev_priv->irq_enable_reg |= USER_INT_FLAG; I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); } - spin_unlock(&dev_priv->user_irq_lock); + DRM_SPINUNLOCK(&dev_priv->user_irq_lock); } void i915_user_irq_off(drm_i915_private_t *dev_priv) { - spin_lock(&dev_priv->user_irq_lock); + DRM_SPINLOCK(&dev_priv->user_irq_lock); if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { // dev_priv->irq_enable_reg &= ~USER_INT_FLAG; // I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); } - spin_unlock(&dev_priv->user_irq_lock); + DRM_SPINUNLOCK(&dev_priv->user_irq_lock); } @@ -597,16 +602,6 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - spin_lock_irqsave(&dev->drw_lock, irqflags); - - if (!drm_get_drawable_info(dev, swap.drawable)) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); - return DRM_ERR(EINVAL); - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); if (seqtype == _DRM_VBLANK_RELATIVE) @@ -629,12 +624,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); - spin_lock_irqsave(&dev->drw_lock, irqflags); + DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); drw = drm_get_drawable_info(dev, swap.drawable); if (!drw) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, + irqflags); DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); return DRM_ERR(EINVAL); @@ -642,13 +638,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) i915_dispatch_vsync_flip(dev, drw, pipe); - spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); return 0; } } - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); list_for_each(list, &dev_priv->vbl_swaps.head) { vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); @@ -657,13 +653,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) vbl_swap->pipe == pipe && vbl_swap->sequence == swap.sequence) { vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP); - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); DRM_DEBUG("Already scheduled\n"); return 0; } } - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); if (dev_priv->swaps_pending >= 100) { DRM_DEBUG("Too many swaps queued\n"); @@ -687,12 +683,12 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (vbl_swap->flip) swap.sequence++; - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head); dev_priv->swaps_pending++; - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap, sizeof(swap)); @@ -715,11 +711,11 @@ void i915_driver_irq_postinstall(drm_device_t * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - spin_lock_init(&dev_priv->swaps_lock); + DRM_SPININIT(&dev_priv->swaps_lock, "swap"); INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); dev_priv->swaps_pending = 0; - spin_lock_init(&dev_priv->user_irq_lock); + DRM_SPININIT(&dev_priv->user_irq_lock, "userirq"); dev_priv->user_irq_refcount = 0; i915_enable_interrupt(dev); From 0c95d489abd19efd2ba017e78a4b28cea0854e77 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 19 Jul 2007 16:42:58 +1000 Subject: [PATCH 145/437] nouveau/nv50: get non-default push buffer sizes working. --- shared-core/nv50_fifo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index feab24c4..4933bbf3 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -263,7 +263,7 @@ nv50_fifo_create_context(struct drm_device *dev, int channel) INSTANCE_WR(ramfc, 0x54/4, 0x000f0000); INSTANCE_WR(ramfc, 0x7c/4, 0x30000001); INSTANCE_WR(ramfc, 0x78/4, 0x00000000); - INSTANCE_WR(ramfc, 0x4c/4, 0x00007fff); + INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); if (!IS_G80) { INSTANCE_WR(chan->ramin->gpuobj, 0, channel); From 51de9ec5e38426b13a1da0f78f3a0894dcb3e495 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 03:36:57 -0700 Subject: [PATCH 146/437] Add current BSD stuff to .gitignore. --- .gitignore | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0fb0f49f..b1b33839 100644 --- a/.gitignore +++ b/.gitignore @@ -38,18 +38,26 @@ bsd-core/savage_drv.h bsd-core/savage_state.c bsd-core/sis_drm.h bsd-core/sis_drv.h +bsd-core/sis_ds.c +bsd-core/sis_ds.h +bsd-core/sis_mm.c bsd-core/tdfx_drv.h bsd-core/via_3d_reg.h bsd-core/via_dma.c bsd-core/via_drm.h bsd-core/via_drv.c bsd-core/via_drv.h +bsd-core/via_ds.c +bsd-core/via_ds.h bsd-core/via_irq.c bsd-core/via_map.c +bsd-core/via_mm.c +bsd-core/via_mm.h bsd-core/via_verifier.c bsd-core/via_verifier.h bsd-core/via_video.c -*~ +bsd-core/*/@ +bsd-core/*/machine *.flags *.ko *.ko.cmd @@ -75,6 +83,7 @@ config.log config.status config.sub configure +configure.lineno cscope.* depcomp device_if.h From d7cf298e540c631795868c52b044c7249bf45902 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 04:59:59 -0700 Subject: [PATCH 147/437] Add some trivial regression tests, one of which fails. --- .gitignore | 5 ++++ Makefile.am | 2 +- configure.ac | 7 +++++- tests/Makefile | 27 --------------------- tests/Makefile.am | 24 +++++++++++++++++++ tests/drmtest.c | 45 ++++++++++++++++++++++++++++++++++ tests/drmtest.h | 36 ++++++++++++++++++++++++++++ tests/getclient.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++ tests/getversion.c | 47 ++++++++++++++++++++++++++++++++++++ tests/openclose.c | 37 ++++++++++++++++++++++++++++ 10 files changed, 261 insertions(+), 29 deletions(-) delete mode 100644 tests/Makefile create mode 100644 tests/Makefile.am create mode 100644 tests/drmtest.c create mode 100644 tests/drmtest.h create mode 100644 tests/getclient.c create mode 100644 tests/getversion.c create mode 100644 tests/openclose.c diff --git a/.gitignore b/.gitignore index b1b33839..9b4eaa89 100644 --- a/.gitignore +++ b/.gitignore @@ -109,3 +109,8 @@ sis.kld stamp-h1 tdfx.kld via.kld +tests/dristat +tests/drmstat +tests/getclient +tests/getversion +tests/openclose diff --git a/Makefile.am b/Makefile.am index 8c5dc702..5b1ae60a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -22,7 +22,7 @@ # here too, but let's just do libdrm for now AUTOMAKE_OPTIONS = foreign -SUBDIRS = libdrm shared-core +SUBDIRS = libdrm shared-core tests pkgconfigdir = @pkgconfigdir@ pkgconfig_DATA = libdrm.pc diff --git a/configure.ac b/configure.ac index 94c47bd1..78203343 100644 --- a/configure.ac +++ b/configure.ac @@ -35,4 +35,9 @@ AC_SYS_LARGEFILE pkgconfigdir=${libdir}/pkgconfig AC_SUBST(pkgconfigdir) -AC_OUTPUT([Makefile libdrm/Makefile shared-core/Makefile libdrm.pc]) +AC_OUTPUT([ + Makefile + libdrm/Makefile + shared-core/Makefile + tests/Makefile + libdrm.pc]) diff --git a/tests/Makefile b/tests/Makefile deleted file mode 100644 index b406e0ad..00000000 --- a/tests/Makefile +++ /dev/null @@ -1,27 +0,0 @@ - -# These definitions are for handling dependencies in the out of kernel build. - -PROGS = dristat drmstat - -CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d - -# Build test utilities - -PRGCFLAGS = $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \ - -D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \ - -I. -I../libdrm -I../shared-core - -DRMSTATLIBS = -L../libdrm -ldrm - - -programs: $(PROGS) - -dristat: dristat.c - $(CC) $(PRGCFLAGS) $< -o $@ - -drmstat: drmstat.c - $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS) - -clean: - rm -f $(CLEANFILES) - diff --git a/tests/Makefile.am b/tests/Makefile.am new file mode 100644 index 00000000..815dae57 --- /dev/null +++ b/tests/Makefile.am @@ -0,0 +1,24 @@ +AM_CFLAGS = \ + -I $(top_srcdir)/shared-core + +noinst_PROGRAMS = \ + dristat \ + drmstat + +EXTRA_LTLIBRARIES = libdrmtest.la +libdrmtest_la_SOURCES = \ + drmtest.c \ + drmtest.h +libdrmtest_la_LIBADD = \ + $(top_builddir)/libdrm/libdrm.la + +LDADD = libdrmtest.la + +TESTS = openclose \ + getversion \ + getclient + +XFAIL_TESTS = getclient + +EXTRA_PROGRAMS = $(TESTS) +CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES) \ No newline at end of file diff --git a/tests/drmtest.c b/tests/drmtest.c new file mode 100644 index 00000000..3697078a --- /dev/null +++ b/tests/drmtest.c @@ -0,0 +1,45 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include "drmtest.h" + +/** Open the first DRM device we can find, searching up to 16 device nodes */ +int drm_open_any(void) +{ + char name[20]; + int i, fd; + + for (i = 0; i < 16; i++) { + sprintf(name, "/dev/dri/card%d", i); + fd = open(name, O_RDWR); + if (fd != -1) + return fd; + } + abort(); +} + diff --git a/tests/drmtest.h b/tests/drmtest.h new file mode 100644 index 00000000..f623ff33 --- /dev/null +++ b/tests/drmtest.h @@ -0,0 +1,36 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include +#include +#include +#include + +#include "xf86drm.h" + +int drm_open_any(void); diff --git a/tests/getclient.c b/tests/getclient.c new file mode 100644 index 00000000..349c16ec --- /dev/null +++ b/tests/getclient.c @@ -0,0 +1,60 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include "drmtest.h" + +/** + * Checks DRM_IOCTL_GET_CLIENT. + */ +int main(int argc, char **argv) +{ + int fd, ret; + drm_client_t client; + + fd = drm_open_any(); + + /* Look for client index 0. This should exist whether we're operating + * on an otherwise unused drm device, or the X Server is running on + * the device. + */ + client.idx = 0; + ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client); + assert(ret == 0); + + /* Look for some absurd client index and make sure it's invalid. + * The DRM drivers currently always return data, so the user has + * no real way to detect when the list has terminated. That's bad, + * and this test is XFAIL as a result. + */ + client.idx = 0x7fffffff; + ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client); + assert(ret == -1 && errno == EINVAL); + + close(fd); + return 0; +} diff --git a/tests/getversion.c b/tests/getversion.c new file mode 100644 index 00000000..3de90de6 --- /dev/null +++ b/tests/getversion.c @@ -0,0 +1,47 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include "drmtest.h" + +/** + * Checks DRM_IOCTL_GET_VERSION and libdrm's drmGetVersion() interface to it. + */ +int main(int argc, char **argv) +{ + int fd; + drmVersionPtr v; + + fd = drm_open_any(); + v = drmGetVersion(fd); + assert(strlen(v->name) != 0); + assert(strlen(v->date) != 0); + assert(strlen(v->desc) != 0); + assert(v->version_major >= 1); + drmFree(v); + close(fd); + return 0; +} diff --git a/tests/openclose.c b/tests/openclose.c new file mode 100644 index 00000000..946a4459 --- /dev/null +++ b/tests/openclose.c @@ -0,0 +1,37 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include "drmtest.h" + +int main(int argc, char **argv) +{ + int fd; + + fd = drm_open_any(); + close(fd); + return 0; +} From 50cb405f93da70054ede29e0c365f06352dc8fe5 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 06:02:20 -0700 Subject: [PATCH 148/437] Fix the getclient test (Need this feature for future tests). --- bsd-core/drm_ioctl.c | 4 +--- tests/Makefile.am | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index e22faa83..b5b5cf58 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -213,9 +213,7 @@ int drm_getclient(DRM_IOCTL_ARGS) } DRM_UNLOCK(); - DRM_COPY_TO_USER_IOCTL( (drm_client_t *)data, client, sizeof(client) ); - - return 0; + return EINVAL; } int drm_getstats(DRM_IOCTL_ARGS) diff --git a/tests/Makefile.am b/tests/Makefile.am index 815dae57..949e2b00 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -18,7 +18,5 @@ TESTS = openclose \ getversion \ getclient -XFAIL_TESTS = getclient - EXTRA_PROGRAMS = $(TESTS) CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES) \ No newline at end of file From ecf3fbe599cd72c495acf339ae24f3a9e01fdb36 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 06:17:04 -0700 Subject: [PATCH 149/437] Add a test for drawable add, remove, and update. --- .gitignore | 1 + tests/Makefile.am | 3 +- tests/drmtest.c | 38 ++++++++++++++ tests/drmtest.h | 1 + tests/updatedraw.c | 127 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 169 insertions(+), 1 deletion(-) create mode 100644 tests/updatedraw.c diff --git a/.gitignore b/.gitignore index 9b4eaa89..7ab6ced0 100644 --- a/.gitignore +++ b/.gitignore @@ -114,3 +114,4 @@ tests/drmstat tests/getclient tests/getversion tests/openclose +tests/updatedraw diff --git a/tests/Makefile.am b/tests/Makefile.am index 949e2b00..3b97fb79 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -16,7 +16,8 @@ LDADD = libdrmtest.la TESTS = openclose \ getversion \ - getclient + getclient \ + updatedraw EXTRA_PROGRAMS = $(TESTS) CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES) \ No newline at end of file diff --git a/tests/drmtest.c b/tests/drmtest.c index 3697078a..cae99a0c 100644 --- a/tests/drmtest.c +++ b/tests/drmtest.c @@ -43,3 +43,41 @@ int drm_open_any(void) abort(); } + +/** + * Open the first DRM device we can find where we end up being the master. + */ +int drm_open_any_master(void) +{ + char name[20]; + int i, fd; + + for (i = 0; i < 16; i++) { + drm_client_t client; + int ret; + + sprintf(name, "/dev/dri/card%d", i); + fd = open(name, O_RDWR); + if (fd == -1) + continue; + + /* Check that we're the only opener and authed. */ + client.idx = 0; + ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client); + assert (ret == 0); + if (!client.auth) { + close(fd); + continue; + } + client.idx = 1; + ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client); + if (ret != -1 || errno != EINVAL) { + close(fd); + continue; + } + return fd; + } + fprintf(stderr, "Couldn't find an un-controlled DRM device\n"); + abort(); +} + diff --git a/tests/drmtest.h b/tests/drmtest.h index f623ff33..afa0df4a 100644 --- a/tests/drmtest.h +++ b/tests/drmtest.h @@ -34,3 +34,4 @@ #include "xf86drm.h" int drm_open_any(void); +int drm_open_any_master(void); diff --git a/tests/updatedraw.c b/tests/updatedraw.c new file mode 100644 index 00000000..1186783a --- /dev/null +++ b/tests/updatedraw.c @@ -0,0 +1,127 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include "drmtest.h" + +static void +set_draw_cliprects_empty(int fd, int drawable) +{ + int ret; + struct drm_update_draw update; + + update.handle = drawable; + update.type = DRM_DRAWABLE_CLIPRECTS; + update.num = 0; + update.data = 0; + + ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update); + assert(ret == 0); +} + +static void +set_draw_cliprects_empty_fail(int fd, int drawable) +{ + int ret; + struct drm_update_draw update; + + update.handle = drawable; + update.type = DRM_DRAWABLE_CLIPRECTS; + update.num = 0; + update.data = 0; + + ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update); + assert(ret == -1 && errno == EINVAL); +} + +static void +set_draw_cliprects_2(int fd, int drawable) +{ + int ret; + struct drm_update_draw update; + drm_clip_rect_t rects[2]; + + rects[0].x1 = 0; + rects[0].y1 = 0; + rects[0].x2 = 10; + rects[0].y2 = 10; + + rects[1].x1 = 10; + rects[1].y1 = 10; + rects[1].x2 = 20; + rects[1].y2 = 20; + + update.handle = drawable; + update.type = DRM_DRAWABLE_CLIPRECTS; + update.num = 2; + update.data = (unsigned long long)(uintptr_t)&rects; + + ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update); + assert(ret == 0); +} + +/** + * Tests drawable management: adding, removing, and updating the cliprects of + * drawables. + */ +int main(int argc, char **argv) +{ + drm_draw_t drawarg; + int fd, ret, drawable; + + fd = drm_open_any_master(); + + /* Create a drawable. + * IOCTL_ADD_DRAW is RDWR, though it should really just be RD + */ + drawarg.handle = 0; + ret = ioctl(fd, DRM_IOCTL_ADD_DRAW, &drawarg); + assert(ret == 0); + drawable = drawarg.handle; + + /* Do a series of cliprect updates */ + set_draw_cliprects_empty(fd, drawable); + set_draw_cliprects_2(fd, drawable); + set_draw_cliprects_empty(fd, drawable); + + /* Remove our drawable */ + drawarg.handle = drawable; + ret = ioctl(fd, DRM_IOCTL_RM_DRAW, &drawarg); + assert(ret == 0); + drawable = drawarg.handle; + + /* Check that removing an unknown drawable returns error */ + drawarg.handle = 0x7fffffff; + ret = ioctl(fd, DRM_IOCTL_RM_DRAW, &drawarg); + assert(ret == -1 && errno == EINVAL); + drawable = drawarg.handle; + + /* Attempt to set cliprects on a nonexistent drawable */ + set_draw_cliprects_empty_fail(fd, drawable); + + close(fd); + return 0; +} From e544286eae71a6b150af4d86096895c14e42c36e Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 06:17:58 -0700 Subject: [PATCH 150/437] FreeBSD: Fix the recently added drawable add/remove/update code. --- bsd-core/drm_drawable.c | 56 +++++++++++++++++------------------------ bsd-core/drm_drv.c | 1 + 2 files changed, 24 insertions(+), 33 deletions(-) diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index bc3ad571..ad25a6df 100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -64,36 +64,23 @@ drm_get_drawable_info(drm_device_t *dev, int handle) return &result->info; } -static struct drm_drawable_info * -drm_drawable_info_alloc(drm_device_t *dev, int handle) +int drm_adddraw(DRM_IOCTL_ARGS) { + DRM_DEVICE; + drm_draw_t draw; struct bsd_drm_drawable_info *info; info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE); if (info == NULL) - return NULL; + return ENOMEM; - info->handle = handle; + info->handle = alloc_unr(dev->drw_unrhdr); + DRM_SPINLOCK(&dev->drw_lock); RB_INSERT(drawable_tree, &dev->drw_head, info); + draw.handle = info->handle; + DRM_SPINUNLOCK(&dev->drw_lock); - return &info->info; -} - -static void -drm_drawable_info_free(drm_device_t *dev, struct drm_drawable_info *info) -{ - RB_REMOVE(drawable_tree, &dev->drw_head, - (struct bsd_drm_drawable_info *)info); - drm_free(info, sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE); -} - -int drm_adddraw(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - drm_draw_t draw; - - draw.handle = alloc_unr(dev->drw_unrhdr); DRM_DEBUG("%d\n", draw.handle); DRM_COPY_TO_USER_IOCTL((drm_draw_t *)data, draw, sizeof(draw)); @@ -107,14 +94,19 @@ int drm_rmdraw(DRM_IOCTL_ARGS) drm_draw_t *draw = (drm_draw_t *)data; struct drm_drawable_info *info; - free_unr(dev->drw_unrhdr, draw->handle); - + DRM_SPINLOCK(&dev->drw_lock); info = drm_get_drawable_info(dev, draw->handle); if (info != NULL) { - drm_drawable_info_free(dev, info); + RB_REMOVE(drawable_tree, &dev->drw_head, + (struct bsd_drm_drawable_info *)info); + DRM_SPINUNLOCK(&dev->drw_lock); + free_unr(dev->drw_unrhdr, draw->handle); + drm_free(info, sizeof(struct bsd_drm_drawable_info), + DRM_MEM_DRAWABLE); + } else { + DRM_SPINUNLOCK(&dev->drw_lock); + return EINVAL; } - - return 0; } int drm_update_draw(DRM_IOCTL_ARGS) @@ -122,13 +114,11 @@ int drm_update_draw(DRM_IOCTL_ARGS) DRM_DEVICE; struct drm_drawable_info *info; struct drm_update_draw *update = (struct drm_update_draw *)data; + int ret; info = drm_get_drawable_info(dev, update->handle); - if (info == NULL) { - info = drm_drawable_info_alloc(dev, update->handle); - if (info == NULL) - return ENOMEM; - } + if (info == NULL) + return EINVAL; switch (update->type) { case DRM_DRAWABLE_CLIPRECTS: @@ -152,10 +142,10 @@ int drm_update_draw(DRM_IOCTL_ARGS) info->num_rects = update->num; } /* For some reason the pointer arg is unsigned long long. */ - copyin((void *)(intptr_t)update->data, info->rects, + ret = copyin((void *)(intptr_t)update->data, info->rects, sizeof(*info->rects) * info->num_rects); DRM_SPINUNLOCK(&dev->drw_lock); - return 0; + return ret; default: return EINVAL; } diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 4be4cd30..75866b1b 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -200,6 +200,7 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist) "dri/card%d", unit); #if __FreeBSD_version >= 500000 mtx_init(&dev->dev_lock, "drm device", NULL, MTX_DEF); + mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); #endif id_entry = drm_find_description(pci_get_vendor(dev->device), From f4e1c1d05cfbd43ac429ab6dc78345ffa3599b7a Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 06:46:13 -0700 Subject: [PATCH 151/437] FreeBSD warnings cleanup. --- bsd-core/drmP.h | 2 ++ bsd-core/drm_drawable.c | 1 + shared-core/radeon_irq.c | 5 +++-- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index fd8f4a27..ff8ad473 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -998,6 +998,8 @@ int drm_dma(DRM_IOCTL_ARGS); /* IRQ support (drm_irq.c) */ int drm_control(DRM_IOCTL_ARGS); int drm_wait_vblank(DRM_IOCTL_ARGS); +void drm_locked_tasklet(drm_device_t *dev, + void (*tasklet)(drm_device_t *dev)); /* AGP/GART support (drm_agpsupport.c) */ int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS); diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index ad25a6df..b81d0a75 100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -103,6 +103,7 @@ int drm_rmdraw(DRM_IOCTL_ARGS) free_unr(dev->drw_unrhdr, draw->handle); drm_free(info, sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE); + return 0; } else { DRM_SPINUNLOCK(&dev->drw_lock); return EINVAL; diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index ad8a0ac7..b973b968 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -144,8 +144,9 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr) return ret; } -int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence, - int crtc) +static int radeon_driver_vblank_do_wait(struct drm_device * dev, + unsigned int *sequence, + int crtc) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; From 5ba94c2ab8be350fee495e5cfe94afb8f663956a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 10:29:18 -0700 Subject: [PATCH 152/437] Initial pass at converting driver to DRM infrastructure. --- linux-core/Makefile | 3 +- linux-core/xgi_cmdlist.c | 99 ++- linux-core/xgi_cmdlist.h | 7 +- linux-core/xgi_drv.c | 1544 +++++--------------------------------- linux-core/xgi_drv.h | 215 ++---- linux-core/xgi_fb.c | 515 ++++++------- linux-core/xgi_fb.h | 47 -- linux-core/xgi_linux.h | 490 ------------ linux-core/xgi_misc.c | 145 ++-- linux-core/xgi_misc.h | 2 - linux-core/xgi_pcie.c | 969 +++++------------------- linux-core/xgi_pcie.h | 68 -- linux-core/xgi_regs.h | 313 ++------ shared-core/xgi_drm.h | 64 +- 14 files changed, 910 insertions(+), 3571 deletions(-) delete mode 100644 linux-core/xgi_fb.h delete mode 100644 linux-core/xgi_linux.h delete mode 100644 linux-core/xgi_pcie.h diff --git a/linux-core/Makefile b/linux-core/Makefile index 2052459d..55e25253 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -91,8 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) NVHEADERS = nv_drv.h $(DRMHEADERS) FFBHEADERS = ffb_drv.h $(DRMHEADERS) NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS) -XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \ - xgi_pcie.h xgi_regs.h xgi_types.h +XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS) PROGS = dristat drmstat diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 61373469..d2018057 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" #include "xgi_misc.h" @@ -55,18 +54,19 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) s_cmdring._cmdRingSize = mem_alloc.size; s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; - s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; + s_cmdring._cmdRingAllocOffset = mem_alloc.offset; s_cmdring._lastBatchStartAddr = 0; s_cmdring._cmdRingOffset = 0; return 1; } -void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) +static void xgi_submit_cmdlist(struct xgi_info * info, + struct xgi_cmd_info * pCmdInfo) { const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); - XGI_INFO("After getCurBatchBeginPort()\n"); + DRM_INFO("After getCurBatchBeginPort()\n"); if (s_cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + beginPort; @@ -75,50 +75,53 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* xgi_waitfor_pci_idle(info); */ // Enable PCI Trigger Mode - XGI_INFO("Enable PCI Trigger Mode \n"); + DRM_INFO("Enable PCI Trigger Mode \n"); /* Jong 06/14/2006; 0x400001a */ - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | M2REG_CLEAR_COUNTERS_MASK | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); /* Jong 06/14/2006; 0x400000a */ - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); // Send PCI begin command - XGI_INFO("Send PCI begin command \n"); + DRM_INFO("Send PCI begin command \n"); - XGI_INFO("portOffset=%d, beginPort=%d\n", + DRM_INFO("portOffset=%d, beginPort=%d\n", portOffset, beginPort); /* beginPort = 48; */ /* 0xc100000 */ - dwWriteReg(portOffset, + dwWriteReg(info->mmio_map, portOffset, (beginPort << 22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Send PCI begin command- After\n"); + DRM_INFO("Send PCI begin command- After\n"); /* 0x80000024 */ - dwWriteReg(portOffset + 4, + dwWriteReg(info->mmio_map, portOffset + 4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); /* 0x1010000 */ - dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); + dwWriteReg(info->mmio_map, portOffset + 8, + (pCmdInfo->_firstBeginAddr >> 4)); /* Jong 06/12/2006; system hang; marked for test */ - dwWriteReg(portOffset + 12, 0); + dwWriteReg(info->mmio_map, portOffset + 12, 0); /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { u32 *lastBatchVirtAddr; - XGI_INFO("s_cmdring._lastBatchStartAddr != 0\n"); + DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n"); if (pCmdInfo->_firstBeginType == BTYPE_3D) { addFlush2D(info); @@ -146,21 +149,38 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* Jong 06/12/2006; system hang; marked for test */ triggerHWCommandList(info, pCmdInfo->_beginCount); } else { - XGI_ERROR("lastBatchVirtAddr is NULL\n"); + DRM_ERROR("lastBatchVirtAddr is NULL\n"); } } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - XGI_INFO("End\n"); + DRM_INFO("End\n"); } + +int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_cmd_info cmd_list; + struct xgi_info *info = dev->dev_private; + + DRM_COPY_FROM_USER_IOCTL(cmd_list, + (struct xgi_cmd_info __user *) data, + sizeof(cmd_list)); + + xgi_submit_cmdlist(info, &cmd_list); + return 0; +} + + /* state: 0 - console 1 - graphic 2 - fb 3 - logout */ -void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo) +int xgi_state_change(struct xgi_info * info, unsigned int to, + unsigned int from) { #define STATE_CONSOLE 0 #define STATE_GRAPHIC 1 @@ -169,26 +189,40 @@ void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo #define STATE_REBOOT 4 #define STATE_SHUTDOWN 5 - if ((pStateInfo->_fromState == STATE_GRAPHIC) - && (pStateInfo->_toState == STATE_CONSOLE)) { - XGI_INFO("[kd] I see, now is to leaveVT\n"); + if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) { + DRM_INFO("[kd] I see, now is to leaveVT\n"); // stop to received batch - } else if ((pStateInfo->_fromState == STATE_CONSOLE) - && (pStateInfo->_toState == STATE_GRAPHIC)) { - XGI_INFO("[kd] I see, now is to enterVT\n"); + } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { + DRM_INFO("[kd] I see, now is to enterVT\n"); xgi_cmdlist_reset(); - } else if ((pStateInfo->_fromState == STATE_GRAPHIC) - && ((pStateInfo->_toState == STATE_LOGOUT) - || (pStateInfo->_toState == STATE_REBOOT) - || (pStateInfo->_toState == STATE_SHUTDOWN))) { - XGI_INFO("[kd] I see, not is to exit from X\n"); + } else if ((from == STATE_GRAPHIC) + && ((to == STATE_LOGOUT) + || (to == STATE_REBOOT) + || (to == STATE_SHUTDOWN))) { + DRM_INFO("[kd] I see, not is to exit from X\n"); // stop to received batch } else { - XGI_ERROR("[kd] Should not happen\n"); + DRM_ERROR("[kd] Should not happen\n"); + return DRM_ERR(EINVAL); } + return 0; } + +int xgi_state_change_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_state_info state; + struct xgi_info *info = dev->dev_private; + + DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data, + sizeof(state)); + + return xgi_state_change(info, state._toState, state._fromState); +} + + void xgi_cmdlist_reset(void) { s_cmdring._lastBatchStartAddr = 0; @@ -198,7 +232,7 @@ void xgi_cmdlist_reset(void) void xgi_cmdlist_cleanup(struct xgi_info * info) { if (s_cmdring._cmdRingBuffer != 0) { - xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); + xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL); s_cmdring._cmdRingBuffer = 0; s_cmdring._cmdRingOffset = 0; s_cmdring._cmdRingSize = 0; @@ -212,7 +246,8 @@ static void triggerHWCommandList(struct xgi_info * info, //Fix me, currently we just trigger one time while (triggerCounter--) { - dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, 0x05000000 + (0x0ffff & s_triggerID++)); // xgi_waitfor_pci_idle(info); } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index d2b95c0e..4bc56ec1 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -60,16 +60,15 @@ typedef enum { struct xgi_cmdring_info { unsigned int _cmdRingSize; u32 _cmdRingBuffer; - unsigned long _cmdRingBusAddr; + unsigned long _cmdRingAllocOffset; u32 _lastBatchStartAddr; u32 _cmdRingOffset; }; extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); -extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo); - -extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo); +extern int xgi_state_change(struct xgi_info * info, unsigned int to, + unsigned int from); extern void xgi_cmdlist_cleanup(struct xgi_info * info); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bd39dfdc..3b9f4cb1 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -25,96 +25,119 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" + +#include "drmP.h" +#include "drm.h" #include "xgi_drv.h" #include "xgi_regs.h" -#include "xgi_pcie.h" #include "xgi_misc.h" #include "xgi_cmdlist.h" -/* for debug */ -static int xgi_temp = 1; -/* - * global parameters - */ -static struct xgi_dev { - u16 vendor; - u16 device; - const char *name; -} xgidev_list[] = { - { - PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, { - PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, { - 0, 0, NULL} +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + xgi_PCI_IDS }; -int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ +static int xgi_bootstrap(DRM_IOCTL_ARGS); -static int xgi_num_devices = 0; +static drm_ioctl_desc_t xgi_ioctls[] = { + [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH}, -struct xgi_info xgi_devices[XGI_MAX_DEVICES]; + [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH}, -#if defined(XGI_PM_SUPPORT_APM) -static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; -#endif + [DRM_IOCTL_NR(DRM_XGI_PCIE_ALLOC)] = {xgi_pcie_alloc_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_PCIE_FREE)] = {xgi_pcie_free_ioctl, DRM_AUTH}, -/* add one for the control device */ -struct xgi_info xgi_ctl_device; -wait_queue_head_t xgi_ctl_waitqueue; + [DRM_IOCTL_NR(DRM_XGI_GE_RESET)] = {xgi_ge_reset_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_DUMP_REGISTER)] = {xgi_dump_register_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH}, +}; -#ifdef CONFIG_PROC_FS -struct proc_dir_entry *proc_xgi; -#endif +static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); -#ifdef CONFIG_DEVFS_FS -devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; -#endif +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static int xgi_driver_load(struct drm_device *dev, unsigned long flags); +static int xgi_driver_unload(struct drm_device *dev); +static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp); +static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); -struct list_head xgi_mempid_list; -/* xgi_ functions.. do not take a state device parameter */ -static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info); -static void xgi_proc_create(void); -static void xgi_proc_remove_all(struct proc_dir_entry *); -static void xgi_proc_remove(void); +static struct drm_driver driver = { + .driver_features = + DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | + DRIVER_IRQ_SHARED | DRIVER_SG, + .dev_priv_size = sizeof(struct xgi_info), + .load = xgi_driver_load, + .unload = xgi_driver_unload, + .preclose = xgi_driver_preclose, + .dma_quiescent = NULL, + .irq_preinstall = NULL, + .irq_postinstall = NULL, + .irq_uninstall = NULL, + .irq_handler = xgi_kern_isr, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = xgi_ioctls, + .dma_ioctl = NULL, -/* xgi_kern_ functions, interfaces used by linux kernel */ -int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, -unsigned int xgi_kern_poll(struct file *, poll_table *); -int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); -int xgi_kern_mmap(struct file *, struct vm_area_struct *); -int xgi_kern_open(struct inode *, struct file *); -int xgi_kern_release(struct inode *inode, struct file *filp); + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, -void xgi_kern_vma_open(struct vm_area_struct *vma); -void xgi_kern_vma_release(struct vm_area_struct *vma); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type); -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access); -#endif + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, -int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); +}; -int xgi_kern_ctl_open(struct inode *, struct file *); -int xgi_kern_ctl_close(struct inode *, struct file *); -unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} -void xgi_kern_isr_bh(unsigned long); -irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); -static void xgi_lock_init(struct xgi_info * info); +static int __init xgi_init(void) +{ + driver.num_ioctls = xgi_max_ioctl; + return drm_init(&driver, pciidlist); +} -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_kern_acpi_standby(struct pci_dev *, u32); -int xgi_kern_acpi_resume(struct pci_dev *); -#endif +static void __exit xgi_exit(void) +{ + drm_exit(&driver); +} + +module_init(xgi_init); +module_exit(xgi_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); + + +void xgi_kern_isr_bh(struct drm_device *dev); /* * verify access to pci config space wasn't disabled behind our back @@ -129,1361 +152,206 @@ int xgi_kern_acpi_resume(struct pci_dev *); static inline void xgi_check_pci_config(struct xgi_info * info, int line) { - unsigned short cmd, flag = 0; + u16 cmd; + bool flag = 0; - // don't do this on the control device, only the actual devices - if (info->flags & XGI_FLAG_CONTROL) - return; - - pci_read_config_word(info->dev, PCI_COMMAND, &cmd); + pci_read_config_word(info->dev->pdev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_MASTER)) { - XGI_INFO("restoring bus mastering! (%d)\n", line); + DRM_INFO("restoring bus mastering! (%d)\n", line); cmd |= PCI_COMMAND_MASTER; flag = 1; } if (!(cmd & PCI_COMMAND_MEMORY)) { - XGI_INFO("restoring MEM access! (%d)\n", line); + DRM_INFO("restoring MEM access! (%d)\n", line); cmd |= PCI_COMMAND_MEMORY; flag = 1; } if (flag) - pci_write_config_word(info->dev, PCI_COMMAND, cmd); + pci_write_config_word(info->dev->pdev, PCI_COMMAND, cmd); } -/* - * struct pci_device_id { - * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID - * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID - * unsigned int class, class_mask; // (class,subclass,prog-if) triplet - * unsigned long driver_data; // Data private to the driver - * }; - */ -static struct pci_device_id xgi_dev_table[] = { - { - .vendor = PCI_VENDOR_ID_XGI, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_DISPLAY_VGA << 8), - .class_mask = ~0, - }, - {} -}; - -/* - * #define MODULE_DEVICE_TABLE(type,name) \ - * MODULE_GENERIC_TABLE(type##_device,name) - */ -MODULE_DEVICE_TABLE(pci, xgi_dev_table); - -/* - * struct pci_driver { - * struct list_head node; - * char *name; - * const struct pci_device_id *id_table; // NULL if wants all devices - * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted - * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) - * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context - * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended - * int (*resume)(struct pci_dev *dev); // Device woken up - * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event - * }; - */ -static struct pci_driver xgi_pci_driver = { - .name = "xgi", - .id_table = xgi_dev_table, - .probe = xgi_kern_probe, -#if defined(XGI_SUPPORT_ACPI) - .suspend = xgi_kern_acpi_standby, - .resume = xgi_kern_acpi_resume, -#endif -}; - -/* - * find xgi devices and set initial state - */ -int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +int xgi_bootstrap(DRM_IOCTL_ARGS) { - struct xgi_info *info; + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + struct xgi_bootstrap bs; + int err; - if ((dev->vendor != PCI_VENDOR_ID_XGI) - || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { - return -1; + + DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data, + sizeof(bs)); + + if (info->bootstrap_done) { + return 0; } - if (xgi_num_devices == XGI_MAX_DEVICES) { - XGI_INFO("maximum device number (%d) reached!\n", - xgi_num_devices); - return -1; - } - - /* enable io, mem, and bus-mastering in pci config space */ - if (pci_enable_device(dev) != 0) { - XGI_INFO("pci_enable_device failed, aborting\n"); - return -1; - } - - XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); - - pci_set_master(dev); - - info = &xgi_devices[xgi_num_devices]; - info->dev = dev; - - xgi_lock_init(info); - - info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); - info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); - - /* check IO region */ - if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) { - XGI_ERROR("cannot reserve MMIO memory\n"); - goto error_disable_dev; - } - - XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); - XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); - - info->mmio.vbase = ioremap_nocache(info->mmio.base, info->mmio.size); - if (!info->mmio.vbase) { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("info->mmio.vbase failed\n"); - goto error_disable_dev; - } xgi_enable_mmio(info); - //xgi_enable_ge(info); + info->pcie.size = bs.gart_size * (1024 * 1024); - XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); - - info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); - info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); - - XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - /* check frame buffer region - if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) - { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("cannot reserve frame buffer memory\n"); - goto error_disable_dev; - } - - info->fb.vbase = ioremap_nocache(info->fb.base, info->fb.size); - - if (!info->fb.vbase) - { - release_mem_region(info->mmio.base, info->mmio.size); - release_mem_region(info->fb.base, info->fb.size); - XGI_ERROR("info->fb.vbase failed\n"); - goto error_disable_dev; - } - */ - info->fb.vbase = NULL; - XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); - - - /* check common error condition */ - if (info->dev->irq == 0) { - XGI_ERROR("Can't find an IRQ for your XGI card! \n"); - goto error_zero_dev; - } - XGI_INFO("info->irq: %lx \n", info->dev->irq); - - //xgi_enable_dvi_interrupt(info); - - /* sanity check the IO apertures */ - if ((info->mmio.base == 0) || (info->mmio.size == 0) - || (info->fb.base == 0) || (info->fb.size == 0)) { - XGI_ERROR("The IO regions for your XGI card are invalid.\n"); - - if ((info->mmio.base == 0) || (info->mmio.size == 0)) { - XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", - info->mmio.base, info->mmio.size); - } - - if ((info->fb.base == 0) || (info->fb.size == 0)) { - XGI_ERROR - ("frame buffer appears to be wrong: 0x%lx 0x%lx\n", - info->fb.base, info->fb.size); - } - - goto error_zero_dev; - } - //xgi_num_devices++; - - return 0; - - error_zero_dev: - release_mem_region(info->fb.base, info->fb.size); - release_mem_region(info->mmio.base, info->mmio.size); - - error_disable_dev: - pci_disable_device(dev); - return -1; - -} - -/* - * vma operations... - * this is only called when the vmas are duplicated. this - * appears to only happen when the process is cloned to create - * a new process, and not when the process is threaded. - * - * increment the usage count for the physical pages, so when - * this clone unmaps the mappings, the pages are not - * deallocated under the original process. - */ -struct vm_operations_struct xgi_vm_ops = { - .open = xgi_kern_vma_open, - .close = xgi_kern_vma_release, - .nopage = xgi_kern_vma_nopage, -}; - -void xgi_kern_vma_open(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) { - struct xgi_pcie_block *block = - (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_INC(block->use_count); - } -} - -void xgi_kern_vma_release(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) { - struct xgi_pcie_block *block = - (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_DEC(block->use_count); - - /* - * if use_count is down to 0, the kernel virtual mapping was freed - * but the underlying physical pages were not, we need to clear the - * bit and free the physical pages. - */ - if (XGI_ATOMIC_READ(block->use_count) == 0) { - // Need TO Finish - XGI_VMA_PRIVATE(vma) = NULL; - } - } -} - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, block->page_table[offset].virt_addr); - xgi_temp = 0; + /* Init the resource manager */ + err = xgi_pcie_heap_init(info); + if (err) { + DRM_ERROR("xgi_pcie_heap_init() failed\n"); + return err; } - if (!page_addr) - goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); - out: - return page; - -} -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access) -{ - struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) - goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); - out: - return page; -} -#endif - -#if 0 -static struct file_operations xgi_fops = { - /* owner: THIS_MODULE, */ - poll:xgi_kern_poll, - ioctl:xgi_kern_ioctl, - mmap:xgi_kern_mmap, - open:xgi_kern_open, - release:xgi_kern_release, -}; -#endif - -static struct file_operations xgi_fops = { - .owner = THIS_MODULE, - .poll = xgi_kern_poll, - .ioctl = xgi_kern_ioctl, - .mmap = xgi_kern_mmap, - .open = xgi_kern_open, - .release = xgi_kern_release, -}; - -static struct xgi_file_private *xgi_alloc_file_private(void) -{ - struct xgi_file_private *fp; - - XGI_KMALLOC(fp, sizeof(struct xgi_file_private)); - if (!fp) - return NULL; - - memset(fp, 0, sizeof(struct xgi_file_private)); - - /* initialize this file's event queue */ - init_waitqueue_head(&fp->wait_queue); - - xgi_init_lock(fp->fp_lock); - - return fp; -} - -static void xgi_free_file_private(struct xgi_file_private * fp) -{ - if (fp == NULL) - return; - - XGI_KFREE(fp, sizeof(struct xgi_file_private)); -} - -int xgi_kern_open(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = NULL; - int dev_num; - int result = 0, status; - - /* - * the type and num values are only valid if we are not using devfs. - * However, since we use them to retrieve the device pointer, we - * don't need them with devfs as filp->private_data is already - * initialized - */ - filp->private_data = xgi_alloc_file_private(); - if (filp->private_data == NULL) - return -ENOMEM; - - XGI_INFO("filp->private_data %p\n", filp->private_data); - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_open(inode, filp); - - /* what device are we talking about? */ - dev_num = XGI_DEVICE_NUMBER(inode); - if (dev_num >= XGI_MAX_DEVICES) { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - return -ENODEV; - } - - info = &xgi_devices[dev_num]; - - XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); - - xgi_down(info->info_sem); - XGI_CHECK_PCI_CONFIG(info); - - XGI_INFO_FROM_FP(filp) = info; - - /* - * map the memory and allocate isr on first open - */ - - if (!(info->flags & XGI_FLAG_OPEN)) { - XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); - - if (info->dev->device == 0) { - XGI_INFO("open of nonexistent device %d\n", dev_num); - result = -ENXIO; - goto failed; - } - - /* initialize struct irqaction */ - status = request_irq(info->dev->irq, xgi_kern_isr, - SA_INTERRUPT | SA_SHIRQ, "xgi", - (void *)info); - if (status != 0) { - if (info->dev->irq && (status == -EBUSY)) { - XGI_ERROR - ("Tried to get irq %d, but another driver", - (unsigned int)info->dev->irq); - XGI_ERROR("has it and is not sharing it.\n"); - } - XGI_ERROR("isr request failed 0x%x\n", status); - result = -EIO; - goto failed; - } - - /* - * #define DECLARE_TASKLET(name, func, data) \ - * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } - */ - info->tasklet.func = xgi_kern_isr_bh; - info->tasklet.data = (unsigned long)info; - tasklet_enable(&info->tasklet); - - /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - xgi_cmdlist_initialize(info, 0x100000); - - info->flags |= XGI_FLAG_OPEN; - } - - XGI_ATOMIC_INC(info->use_count); - - failed: - xgi_up(info->info_sem); - - if ((result) && filp->private_data) { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - } - - return result; -} - -int xgi_kern_release(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = XGI_INFO_FROM_FP(filp); - - XGI_CHECK_PCI_CONFIG(info); - - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_close(inode, filp); - - XGI_INFO("Jong-xgi_kern_release on device %d\n", - XGI_DEVICE_NUMBER(inode)); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { - - /* - * The usage count for this device has dropped to zero, it can be shut - * down safely; disable its interrupts. - */ - - /* - * Disable this device's tasklet to make sure that no bottom half will - * run with undefined device state. - */ - tasklet_disable(&info->tasklet); - - /* - * Free the IRQ, which may block until all pending interrupt processing - * has completed. - */ - free_irq(info->dev->irq, (void *)info); - - xgi_cmdlist_cleanup(info); - - /* leave INIT flag alone so we don't reinit every time */ - info->flags &= ~XGI_FLAG_OPEN; - } - - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + xgi_cmdlist_initialize(info, 0x100000); + info->bootstrap_done = 1; return 0; } -int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) + +void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) { - //struct inode *inode = INODE_FROM_FP(filp); - struct xgi_info *info = XGI_INFO_FROM_FP(filp); - struct xgi_pcie_block *block; - int pages = 0; - unsigned long prot; + struct xgi_info * info = dev->dev_private; - XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", - vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); - - XGI_CHECK_PCI_CONFIG(info); - - if (XGI_MASK_OFFSET(vma->vm_start) - || XGI_MASK_OFFSET(vma->vm_end)) { - XGI_ERROR("VM: bad mmap range: %lx - %lx\n", - vma->vm_start, vma->vm_end); - return -ENXIO; - } - - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - - vma->vm_ops = &xgi_vm_ops; - - /* XGI IO(reg) space */ - if (IS_IO_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - /* mark it as IO so that we don't dump it on core dump */ - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap io space \n"); - } - /* XGI fb space */ - /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ - else if (IS_FB_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } - /* PCIE allocator */ - /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ - else if (IS_PCIE_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - xgi_down(info->pcie_sem); - - block = xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); - - if (block == NULL) { - XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - if (block->page_count != pages) { - XGI_ERROR - ("pre-allocated PCIE memory has wrong number of pages!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - vma->vm_private_data = block; - XGI_ATOMIC_INC(block->use_count); - xgi_up(info->pcie_sem); - - /* - * prevent the swapper from swapping it out - * mark the memory i/o so the buffers aren't - * dumped on core dumps */ - vma->vm_flags |= (VM_LOCKED | VM_IO); - - /* un-cached */ - prot = pgprot_val(vma->vm_page_prot); - /* - if (boot_cpu_data.x86 > 3) - prot |= _PAGE_PCD | _PAGE_PWT; - */ - vma->vm_page_prot = __pgprot(prot); - - XGI_INFO("VM: mmap pcie space \n"); - } -#if 0 - else if (IS_FB_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } -#endif - else { - vma->vm_flags |= (VM_IO | VM_LOCKED); - XGI_ERROR("VM: mmap wrong range \n"); - } - - vma->vm_file = filp; - - return 0; + xgi_pcie_free_all(info, filp); + xgi_fb_free_all(info, filp); } -unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) -{ - struct xgi_file_private *fp; - struct xgi_info *info; - unsigned int mask = 0; - unsigned long eflags; - - info = XGI_INFO_FROM_FP(filp); - - if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) - return xgi_kern_ctl_poll(filp, wait); - - fp = XGI_GET_FP(filp); - - if (!(filp->f_flags & O_NONBLOCK)) { - /* add us to the list */ - poll_wait(filp, &fp->wait_queue, wait); - } - - xgi_lock_irqsave(fp->fp_lock, eflags); - - /* wake the user on any event */ - if (fp->num_events) { - XGI_INFO("Hey, an event occured!\n"); - /* - * trigger the client, when they grab the event, - * we'll decrement the event count - */ - mask |= (POLLPRI | POLLIN); - } - xgi_unlock_irqsave(fp->fp_lock, eflags); - - return mask; -} - -int xgi_kern_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct xgi_info *info; - struct xgi_mem_alloc *alloc = NULL; - - int status = 0; - void *arg_copy; - int arg_size; - int err = 0; - - info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), - _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); - /* - * extract the type and number bitfields, and don't decode - * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() - */ - if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) - return -ENOTTY; - if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) - return -ENOTTY; - - /* - * the direction is a bitmask, and VERIFY_WRITE catches R/W - * transfers. `Type' is user-oriented, while - * access_ok is kernel-oriented, so the concept of "read" and - * "write" is reversed - */ - if (_IOC_DIR(cmd) & _IOC_READ) { - err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); - } else if (_IOC_DIR(cmd) & _IOC_WRITE) { - err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); - } - if (err) - return -EFAULT; - - XGI_CHECK_PCI_CONFIG(info); - - arg_size = _IOC_SIZE(cmd); - XGI_KMALLOC(arg_copy, arg_size); - if (arg_copy == NULL) { - XGI_ERROR("failed to allocate ioctl memory\n"); - return -ENOMEM; - } - - /* Jong 05/25/2006 */ - /* copy_from_user(arg_copy, (void *)arg, arg_size); */ - if (copy_from_user(arg_copy, (void *)arg, arg_size)) { - XGI_ERROR("failed to copyin ioctl data\n"); - XGI_INFO("Jong-copy_from_user-fail! \n"); - } else - XGI_INFO("Jong-copy_from_user-OK! \n"); - - alloc = (struct xgi_mem_alloc *) arg_copy; - XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, - arg_size); - - switch (_IOC_NR(cmd)) { - case XGI_ESC_POST_VBIOS: - XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); - break; - case XGI_ESC_FB_ALLOC: - XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, alloc, 0); - break; - case XGI_ESC_FB_FREE: - XGI_INFO("Jong-xgi_ioctl_fb_free \n"); - xgi_fb_free(info, *(unsigned long *)arg_copy); - break; - case XGI_ESC_PCIE_ALLOC: - XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, alloc, 0); - break; - case XGI_ESC_PCIE_FREE: - XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", - *((unsigned long *)arg_copy)); - xgi_pcie_free(info, *((unsigned long *)arg_copy)); - break; - case XGI_ESC_GE_RESET: - XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); - xgi_ge_reset(info); - break; - case XGI_ESC_DUMP_REGISTER: - XGI_INFO("Jong-xgi_ioctl_dump_register \n"); - xgi_dump_register(info); - break; - case XGI_ESC_DEBUG_INFO: - XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); - xgi_restore_registers(info); - break; - case XGI_ESC_SUBMIT_CMDLIST: - XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); - xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy); - break; - case XGI_ESC_TEST_RWINKERNEL: - XGI_INFO("Jong-xgi_test_rwinkernel \n"); - xgi_test_rwinkernel(info, *(unsigned long *)arg_copy); - break; - case XGI_ESC_STATE_CHANGE: - XGI_INFO("Jong-xgi_state_change \n"); - xgi_state_change(info, (struct xgi_state_info *) arg_copy); - break; - default: - XGI_INFO("Jong-xgi_ioctl_default \n"); - status = -EINVAL; - break; - } - - if (copy_to_user((void *)arg, arg_copy, arg_size)) { - XGI_ERROR("failed to copyout ioctl data\n"); - XGI_INFO("Jong-copy_to_user-fail! \n"); - } else - XGI_INFO("Jong-copy_to_user-OK! \n"); - - XGI_KFREE(arg_copy, arg_size); - return status; -} - -/* - * xgi control driver operations defined here - */ -int xgi_kern_ctl_open(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = &xgi_ctl_device; - - int rc = 0; - - XGI_INFO("Jong-xgi_kern_ctl_open\n"); - - xgi_down(info->info_sem); - info->device_number = XGI_CONTROL_DEVICE_NUMBER; - - /* save the xgi info in file->private_data */ - filp->private_data = info; - - if (XGI_ATOMIC_READ(info->use_count) == 0) { - init_waitqueue_head(&xgi_ctl_waitqueue); - } - - info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; - - XGI_ATOMIC_INC(info->use_count); - xgi_up(info->info_sem); - - return rc; -} - -int xgi_kern_ctl_close(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-xgi_kern_ctl_close\n"); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { - info->flags = 0; - } - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) -{ - //struct xgi_info *info = XGI_INFO_FROM_FP(filp);; - unsigned int ret = 0; - - if (!(filp->f_flags & O_NONBLOCK)) { - poll_wait(filp, &xgi_ctl_waitqueue, wait); - } - - return ret; -} - -/* - * xgi proc system - */ -static u8 xgi_find_pcie_capability(struct pci_dev *dev) -{ - u16 status; - u8 cap_ptr, cap_id; - - pci_read_config_word(dev, PCI_STATUS, &status); - status &= PCI_STATUS_CAP_LIST; - if (!status) - return 0; - - switch (dev->hdr_type) { - case PCI_HEADER_TYPE_NORMAL: - case PCI_HEADER_TYPE_BRIDGE: - pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); - break; - default: - return 0; - } - - do { - cap_ptr &= 0xFC; - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, - &cap_ptr); - } while (cap_ptr && cap_id != 0xFF); - - return 0; -} - -int xgi_kern_read_card_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - struct pci_dev *dev; - char *type; - int len = 0; - - struct xgi_info *info; - info = (struct xgi_info *) data; - - dev = info->dev; - if (!dev) - return 0; - - type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; - len += sprintf(page + len, "Card Type: \t %s\n", type); - - XGI_PCI_DEV_PUT(dev); - return len; -} - -int xgi_kern_read_version(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - int len = 0; - - len += sprintf(page + len, "XGI version: %s\n", "1.0"); - len += sprintf(page + len, "GCC version: %s\n", "3.0"); - - return len; -} - -int xgi_kern_read_pcie_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} - -int xgi_kern_read_status(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} - -static void xgi_proc_create(void) -{ -#ifdef CONFIG_PROC_FS - - struct pci_dev *dev; - int i = 0; - char name[6]; - - struct proc_dir_entry *entry; - struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; - - struct xgi_info *info; - struct xgi_info *xgi_max_devices; - - /* world readable directory */ - int flags = S_IFDIR | S_IRUGO | S_IXUGO; - - proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); - if (!proc_xgi) - goto failed; - - proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); - if (!proc_xgi_cards) - goto failed; - - proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); - if (!proc_xgi_pcie) - goto failed; - - /* - * Set the module owner to ensure that the reference - * count reflects accesses to the proc files. - */ - proc_xgi->owner = THIS_MODULE; - proc_xgi_cards->owner = THIS_MODULE; - proc_xgi_pcie->owner = THIS_MODULE; - - xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; - for (info = xgi_devices; info < xgi_max_devices; info++) { - /* world readable file */ - flags = S_IFREG | S_IRUGO; - - dev = info->dev; - if (!dev) - break; - - sprintf(name, "%d", i++); - entry = create_proc_entry(name, flags, proc_xgi_cards); - if (!entry) { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_card_info; - entry->owner = THIS_MODULE; - - if (xgi_find_pcie_capability(dev)) { - entry = - create_proc_entry("status", flags, proc_xgi_pcie); - if (!entry) { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_status; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("card", flags, proc_xgi_pcie); - if (!entry) { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - } - - XGI_PCI_DEV_PUT(dev); - } - - entry = create_proc_entry("version", flags, proc_xgi); - if (!entry) - goto failed; - - entry->read_proc = xgi_kern_read_version; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); - if (!entry) - goto failed; - - entry->data = NULL; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - - return; - - failed: - XGI_ERROR("failed to create /proc entries!\n"); - xgi_proc_remove_all(proc_xgi); -#endif -} - -#ifdef CONFIG_PROC_FS -static void xgi_proc_remove_all(struct proc_dir_entry *entry) -{ - while (entry) { - struct proc_dir_entry *next = entry->next; - if (entry->subdir) - xgi_proc_remove_all(entry->subdir); - remove_proc_entry(entry->name, entry->parent); - if (entry == proc_xgi) - break; - entry = next; - } -} -#endif - -static void xgi_proc_remove(void) -{ -#ifdef CONFIG_PROC_FS - xgi_proc_remove_all(proc_xgi); -#endif -} /* * driver receives an interrupt if someone waiting, then hand it off. */ -irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { - struct xgi_info *info = (struct xgi_info *) dev_id; + struct drm_device *dev = (struct drm_device *) arg; +// struct xgi_info *info = dev->dev_private; u32 need_to_run_bottom_half = 0; - //XGI_INFO("xgi_kern_isr \n"); + //DRM_INFO("xgi_kern_isr \n"); //XGI_CHECK_PCI_CONFIG(info); //xgi_dvi_irq_handler(info); if (need_to_run_bottom_half) { - tasklet_schedule(&info->tasklet); + drm_locked_tasklet(dev, xgi_kern_isr_bh); } return IRQ_HANDLED; } -void xgi_kern_isr_bh(unsigned long data) +void xgi_kern_isr_bh(struct drm_device *dev) { - struct xgi_info *info = (struct xgi_info *) data; + struct xgi_info *info = dev->dev_private; - XGI_INFO("xgi_kern_isr_bh \n"); + DRM_INFO("xgi_kern_isr_bh \n"); //xgi_dvi_irq_handler(info); XGI_CHECK_PCI_CONFIG(info); } -static void xgi_lock_init(struct xgi_info * info) +int xgi_driver_load(struct drm_device *dev, unsigned long flags) { - if (info == NULL) - return; + struct xgi_info *info; + int err; - spin_lock_init(&info->info_lock); - sema_init(&info->info_sem, 1); + info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); + if (!info) + return DRM_ERR(ENOMEM); + + (void) memset(info, 0, sizeof(*info)); + dev->dev_private = info; + info->dev = dev; + sema_init(&info->fb_sem, 1); sema_init(&info->pcie_sem, 1); - XGI_ATOMIC_SET(info->use_count, 0); -} + info->mmio.base = drm_get_resource_start(dev, 1); + info->mmio.size = drm_get_resource_len(dev, 1); -static void xgi_dev_init(struct xgi_info * info) -{ - struct pci_dev *pdev = NULL; - struct xgi_dev *dev; - int found = 0; - u16 pci_cmd; + DRM_INFO("mmio base: 0x%lx, size: 0x%x\n", + (unsigned long) info->mmio.base, info->mmio.size); - XGI_INFO("Enter xgi_dev_init \n"); - //XGI_PCI_FOR_EACH_DEV(pdev) - { - for (dev = xgidev_list; dev->vendor; dev++) { - if ((dev->vendor == pdev->vendor) - && (dev->device == pdev->device)) { - u8 rev_id; - - XGI_INFO("dev->vendor = pdev->vendor= %x \n", - dev->vendor); - XGI_INFO("dev->device = pdev->device= %x \n", - dev->device); - - xgi_devices[found].dev = pdev; - - pci_read_config_byte(pdev, PCI_REVISION_ID, - rev_id); - - XGI_INFO("PCI_REVISION_ID= %x \n", rev_id); - - pci_read_config_word(pdev, PCI_COMMAND, - &pci_cmd); - - XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); - - break; - } - } - } -} - -/* - * Export to Linux Kernel - */ - -static int __init xgi_init_module(void) -{ - struct xgi_info *info = &xgi_devices[xgi_num_devices]; - int i, result; - - XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); - //SET_MODULE_OWNER(&xgi_fops); - - memset(xgi_devices, 0, sizeof(xgi_devices)); - - if (pci_register_driver(&xgi_pci_driver) < 0) { - pci_unregister_driver(&xgi_pci_driver); - XGI_ERROR("no XGI graphics adapter found\n"); - return -ENODEV; + if ((info->mmio.base == 0) || (info->mmio.size == 0)) { + DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->mmio.base, info->mmio.size); + return DRM_ERR(EINVAL); } - XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, - xgi_devices[xgi_num_devices].fb.base); - XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, - xgi_devices[xgi_num_devices].fb.size); -/* Jong 07/27/2006; test for ubuntu */ -/* -#ifdef CONFIG_DEVFS_FS - - XGI_INFO("Jong-Use devfs \n"); - do - { - xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); - if (xgi_devfs_handles[0] == NULL) - { - result = -ENOMEM; - XGI_ERROR("devfs register failed\n"); - goto failed; - } - } while(0); - #else *//* no devfs, do it the "classic" way */ - - XGI_INFO("Jong-Use non-devfs \n"); - /* - * Register your major, and accept a dynamic number. This is the - * first thing to do, in order to avoid releasing other module's - * fops in scull_cleanup_module() - */ - result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); - if (result < 0) { - XGI_ERROR("register chrdev failed\n"); - pci_unregister_driver(&xgi_pci_driver); - return result; - } - if (xgi_major == 0) - xgi_major = result; /* dynamic */ - - /* #endif *//* CONFIG_DEVFS_FS */ - - XGI_INFO("Jong-major number %d\n", xgi_major); - - /* instantiate tasklets */ - for (i = 0; i < XGI_MAX_DEVICES; i++) { - /* - * We keep one tasklet per card to avoid latency issues with more - * than one device; no two instances of a single tasklet are ever - * executed concurrently. - */ - XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL | _DRM_READ_ONLY, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; } - /* init the xgi control device */ - { - struct xgi_info *info_ctl = &xgi_ctl_device; - xgi_lock_init(info_ctl); + xgi_enable_mmio(info); + //xgi_enable_ge(info); + + info->fb.base = drm_get_resource_start(dev, 0); + info->fb.size = drm_get_resource_len(dev, 0); + + DRM_INFO("fb base: 0x%lx, size: 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + + info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; + + DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", + (unsigned long) info->fb.base, info->fb.size); + + + if ((info->fb.base == 0) || (info->fb.size == 0)) { + DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + return DRM_ERR(EINVAL); } + + + xgi_mem_block_cache = kmem_cache_create("xgi_mem_block", + sizeof(struct xgi_mem_block), + 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (xgi_mem_block_cache == NULL) { + return DRM_ERR(ENOMEM); + } + + /* Init the resource manager */ - INIT_LIST_HEAD(&xgi_mempid_list); - if (!xgi_fb_heap_init(info)) { - XGI_ERROR("xgi_fb_heap_init() failed\n"); - result = -EIO; - goto failed; + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("xgi_fb_heap_init() failed\n"); + return err; } - /* Init the resource manager */ - if (!xgi_pcie_heap_init(info)) { - XGI_ERROR("xgi_pcie_heap_init() failed\n"); - result = -EIO; - goto failed; - } - - /* create /proc/driver/xgi */ - xgi_proc_create(); - -#if defined(DEBUG) - inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); -#endif - return 0; +} - failed: -#ifdef CONFIG_DEVFS_FS - XGI_DEVFS_REMOVE_CONTROL(); - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif +int xgi_driver_unload(struct drm_device *dev) +{ + struct xgi_info * info = dev->dev_private; - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - for (i = 0; i < xgi_num_devices; i++) { - if (xgi_devices[i].dev) { - release_mem_region(xgi_devices[i].fb.base, - xgi_devices[i].fb.size); - release_mem_region(xgi_devices[i].mmio.base, - xgi_devices[i].mmio.size); - } + xgi_cmdlist_cleanup(info); + if (info->fb_map != NULL) { + drm_rmmap(info->dev, info->fb_map); } - pci_unregister_driver(&xgi_pci_driver); - return result; - - return 1; -} - -void __exit xgi_exit_module(void) -{ - int i; - -#ifdef CONFIG_DEVFS_FS - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); - for (i = 0; i < XGI_MAX_DEVICES; i++) { - if (xgi_devices[i].dev) { - /* clean up the flush2D batch array */ - xgi_cmdlist_cleanup(&xgi_devices[i]); - - if (xgi_devices[i].fb.vbase != NULL) { - iounmap(xgi_devices[i].fb.vbase); - xgi_devices[i].fb.vbase = NULL; - } - if (xgi_devices[i].mmio.vbase != NULL) { - iounmap(xgi_devices[i].mmio.vbase); - xgi_devices[i].mmio.vbase = NULL; - } - //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); - //XGI_INFO("release frame buffer mem region scceeded\n"); - - release_mem_region(xgi_devices[i].mmio.base, - xgi_devices[i].mmio.size); - XGI_INFO("release MMIO mem region scceeded\n"); - - xgi_fb_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); - - xgi_pcie_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); - - XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); - } + if (info->mmio_map != NULL) { + drm_rmmap(info->dev, info->mmio_map); } - pci_unregister_driver(&xgi_pci_driver); + xgi_mem_heap_cleanup(&info->fb_heap); + xgi_mem_heap_cleanup(&info->pcie_heap); + xgi_pcie_lut_cleanup(info); - /* remove /proc/driver/xgi */ - xgi_proc_remove(); + if (xgi_mem_block_cache) { + kmem_cache_destroy(xgi_mem_block_cache); + xgi_mem_block_cache = NULL; + } -#if defined(DEBUG) - inter_module_unregister("xgi_devices"); -#endif + return 0; } - -module_init(xgi_init_module); -module_exit(xgi_exit_module); - -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_acpi_event(struct pci_dev *dev, u32 state) -{ - return 1; -} - -int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) -{ - return 1; -} - -int xgi_kern_acpi_resume(struct pci_dev *dev) -{ - return 1; -} -#endif - -MODULE_AUTHOR("Andrea Zhang "); -MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); -MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 382bb7a6..20965876 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -29,115 +29,69 @@ #ifndef _XGI_DRV_H_ #define _XGI_DRV_H_ +#include "drmP.h" +#include "drm.h" + +#define DRIVER_AUTHOR "Andrea Zhang " + +#define DRIVER_NAME "xgi" +#define DRIVER_DESC "XGI XP5 / XP10 / XG47" +#define DRIVER_DATE "20070710" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 8 +#define DRIVER_PATCHLEVEL 0 + #include "xgi_drm.h" -#define XGI_MAJOR_VERSION 0 -#define XGI_MINOR_VERSION 7 -#define XGI_PATCHLEVEL 5 - -#define XGI_DRV_VERSION "0.7.5" - -#ifndef XGI_DRV_NAME -#define XGI_DRV_NAME "xgi" -#endif - -/* - * xgi reserved major device number, Set this to 0 to - * request dynamic major number allocation. - */ -#ifndef XGI_DEV_MAJOR -#define XGI_DEV_MAJOR 0 -#endif - -#ifndef XGI_MAX_DEVICES -#define XGI_MAX_DEVICES 1 -#endif - -/* Jong 06/06/2006 */ -/* #define XGI_DEBUG */ - -#ifndef PCI_VENDOR_ID_XGI -/* -#define PCI_VENDOR_ID_XGI 0x1023 -*/ -#define PCI_VENDOR_ID_XGI 0x18CA - -#endif - -#ifndef PCI_DEVICE_ID_XP5 -#define PCI_DEVICE_ID_XP5 0x2200 -#endif - -#ifndef PCI_DEVICE_ID_XG47 -#define PCI_DEVICE_ID_XG47 0x0047 -#endif - -/* Macros to make printk easier */ -#define XGI_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) - -#define XGI_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) - -/* #define XGI_DEBUG */ - -#ifdef XGI_DEBUG -#define XGI_INFO(fmt, arg...) \ - printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) -/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ -#else -#define XGI_INFO(fmt, arg...) do { } while (0) -#endif - -/* device name length; must be atleast 8 */ -#define XGI_DEVICE_NAME_LENGTH 40 - -/* need a fake device number for control device; just to flag it for msgs */ -#define XGI_CONTROL_DEVICE_NUMBER 100 - struct xgi_aperture { - unsigned long base; + dma_addr_t base; unsigned int size; - void *vbase; +}; + +struct xgi_mem_block { + struct list_head list; + unsigned long offset; + unsigned long size; + DRMFILE filp; + + unsigned int owner; +}; + +struct xgi_mem_heap { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; + + bool initialized; }; struct xgi_info { - struct pci_dev *dev; - int flags; - int device_number; + struct drm_device *dev; + + bool bootstrap_done; /* physical characteristics */ struct xgi_aperture mmio; struct xgi_aperture fb; struct xgi_aperture pcie; + struct drm_map *mmio_map; + struct drm_map *pcie_map; + struct drm_map *fb_map; + /* look up table parameters */ - u32 *lut_base; + struct drm_dma_handle *lut_handle; unsigned int lutPageSize; - unsigned int lutPageOrder; - bool isLUTInLFB; - unsigned int sdfbPageSize; - u32 pcie_config; - u32 pcie_status; + struct xgi_mem_heap fb_heap; + struct xgi_mem_heap pcie_heap; - atomic_t use_count; - - /* keep track of any pending bottom halfes */ - struct tasklet_struct tasklet; - - spinlock_t info_lock; - - struct semaphore info_sem; struct semaphore fb_sem; struct semaphore pcie_sem; }; -struct xgi_ioctl_post_vbios { - unsigned int bus; - unsigned int slot; -}; - enum PcieOwner { PCIE_2D = 0, /* @@ -151,64 +105,47 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -struct xgi_mem_pid { - struct list_head list; - enum xgi_mem_location location; - unsigned long bus_addr; - unsigned long pid; -}; - -/* - * flags - */ -#define XGI_FLAG_OPEN 0x0001 -#define XGI_FLAG_NEEDS_POSTING 0x0002 -#define XGI_FLAG_WAS_POSTED 0x0004 -#define XGI_FLAG_CONTROL 0x0010 -#define XGI_FLAG_MAP_REGS_EARLY 0x0200 - -/* mmap(2) offsets */ - -#define IS_IO_OFFSET(info, offset, length) \ - (((offset) >= (info)->mmio.base) \ - && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) - -/* Jong 06/14/2006 */ -/* (info)->fb.base is a base address for physical (bus) address space */ -/* what's the definition of offest? on physical (bus) address space or HW address space */ -/* Jong 06/15/2006; use HW address space */ -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= 0) \ - && (((offset) + (length)) <= (info)->fb.size)) -#if 0 -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= (info)->fb.base) \ - && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) -#endif - -#define IS_PCIE_OFFSET(info, offset, length) \ - (((offset) >= (info)->pcie.base) \ - && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) +extern struct kmem_cache *xgi_mem_block_cache; +extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, + unsigned long size, enum PcieOwner owner); +extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + DRMFILE filp); +extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, + unsigned int end); +extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); extern int xgi_fb_heap_init(struct xgi_info * info); -extern void xgi_fb_heap_cleanup(struct xgi_info * info); -extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - pid_t pid); -extern void xgi_fb_free(struct xgi_info * info, unsigned long offset); -extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt); +extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + DRMFILE filp); + +extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, + DRMFILE filp); extern int xgi_pcie_heap_init(struct xgi_info * info); -extern void xgi_pcie_heap_cleanup(struct xgi_info * info); +extern void xgi_pcie_lut_cleanup(struct xgi_info * info); -extern void xgi_pcie_alloc(struct xgi_info * info, - struct xgi_mem_alloc * alloc, pid_t pid); -extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); -extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, - unsigned long address); -extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); +extern int xgi_pcie_alloc(struct xgi_info * info, + struct xgi_mem_alloc * alloc, DRMFILE filp); -extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address); +extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, + DRMFILE filp); + +extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); + +extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE); +extern void xgi_fb_free_all(struct xgi_info *, DRMFILE); + +extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS); +extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS); +extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS); +extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS); +extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS); +extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS); +extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS); +extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS); +extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS); +extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 7d390d4b..ce689847 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -26,343 +26,126 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" -#include "xgi_fb.h" #define XGI_FB_HEAP_START 0x1000000 -static struct xgi_mem_heap *xgi_fb_heap; -static struct kmem_cache *xgi_fb_cache_block = NULL; -extern struct list_head xgi_mempid_list; +struct kmem_cache *xgi_mem_block_cache = NULL; static struct xgi_mem_block *xgi_mem_new_node(void); -static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size); -static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset); -void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - pid_t pid) -{ - struct xgi_mem_block *block; - struct xgi_mem_pid *mempid_block; - if (alloc->is_front) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->bus_addr = info->fb.base; - alloc->hw_addr = 0; - XGI_INFO - ("Video RAM allocation on front buffer successfully! \n"); - } else { - xgi_down(info->fb_sem); - block = xgi_mem_alloc(info, alloc->size); - xgi_up(info->fb_sem); - - if (block == NULL) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("Video RAM allocation failed\n"); - } else { - XGI_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *)block->offset); - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = block->size; - alloc->bus_addr = info->fb.base + block->offset; - alloc->hw_addr = block->offset; - - /* manage mempid */ - mempid_block = - kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); - mempid_block->location = XGI_MEMLOC_LOCAL; - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = pid; - - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - - XGI_INFO - ("Memory ProcessID add one fb block pid:%ld successfully! \n", - mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} - -void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) -{ - struct xgi_mem_block *block; - unsigned long offset = bus_addr - info->fb.base; - struct xgi_mem_pid *mempid_block; - struct xgi_mem_pid *mempid_freeblock = NULL; - - if (offset < 0) { - XGI_INFO("free onscreen frame buffer successfully !\n"); - } else { - xgi_down(info->fb_sem); - block = xgi_mem_free(info, offset); - xgi_up(info->fb_sem); - - if (block == NULL) { - XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", - offset); - } - - /* manage mempid */ - list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == XGI_MEMLOC_LOCAL - && mempid_block->bus_addr == bus_addr) { - mempid_freeblock = mempid_block; - break; - } - } - if (mempid_freeblock) { - list_del(&mempid_freeblock->list); - XGI_INFO - ("Memory ProcessID delete one fb block pid:%ld successfully! \n", - mempid_freeblock->pid); - kfree(mempid_freeblock); - } - } -} - -int xgi_fb_heap_init(struct xgi_info * info) +int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start, + unsigned int end) { struct xgi_mem_block *block; - xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL); - if (!xgi_fb_heap) { - XGI_ERROR("xgi_fb_heap alloc failed\n"); - return 0; - } + INIT_LIST_HEAD(&heap->free_list); + INIT_LIST_HEAD(&heap->used_list); + INIT_LIST_HEAD(&heap->sort_list); + heap->initialized = TRUE; - INIT_LIST_HEAD(&xgi_fb_heap->free_list); - INIT_LIST_HEAD(&xgi_fb_heap->used_list); - INIT_LIST_HEAD(&xgi_fb_heap->sort_list); - - xgi_fb_cache_block = - kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0, - SLAB_HWCACHE_ALIGN, NULL, NULL); - - if (NULL == xgi_fb_cache_block) { - XGI_ERROR("Fail to creat xgi_fb_block\n"); - goto fail1; - } - - block = - (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, - GFP_KERNEL); + block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); if (!block) { - XGI_ERROR("kmem_cache_alloc failed\n"); - goto fail2; + return DRM_ERR(ENOMEM); } - block->offset = XGI_FB_HEAP_START; - block->size = info->fb.size - XGI_FB_HEAP_START; + block->offset = start; + block->size = end - start; - list_add(&block->list, &xgi_fb_heap->free_list); + list_add(&block->list, &heap->free_list); - xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + heap->max_freesize = end - start; - XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, - block->size); - XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", - xgi_fb_heap->max_freesize); - - return 1; - - fail2: - if (xgi_fb_cache_block) { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; - } - fail1: - if (xgi_fb_heap) { - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } return 0; } -void xgi_fb_heap_cleanup(struct xgi_info * info) + +void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap) { struct list_head *free_list; struct xgi_mem_block *block; struct xgi_mem_block *next; int i; - if (xgi_fb_heap) { - free_list = &xgi_fb_heap->free_list; - for (i = 0; i < 3; i++, free_list++) { - list_for_each_entry_safe(block, next, free_list, list) { - XGI_INFO - ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - i, block->offset, block->size); - //XGI_INFO("No. %d free block: 0x%p \n", i, block); - kmem_cache_free(xgi_fb_cache_block, block); - block = NULL; - } + free_list = &heap->free_list; + for (i = 0; i < 3; i++, free_list++) { + list_for_each_entry_safe(block, next, free_list, list) { + DRM_INFO + ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + i, block->offset, block->size); + kmem_cache_free(xgi_mem_block_cache, block); + block = NULL; } - XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } - - if (xgi_fb_cache_block) { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; } + + heap->initialized = 0; } -static struct xgi_mem_block *xgi_mem_new_node(void) -{ - struct xgi_mem_block *block; - block = - (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, - GFP_KERNEL); +struct xgi_mem_block *xgi_mem_new_node(void) +{ + struct xgi_mem_block *block = + kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); + if (!block) { - XGI_ERROR("kmem_cache_alloc failed\n"); + DRM_ERROR("kmem_cache_alloc failed\n"); return NULL; } + block->offset = 0; + block->size = 0; + block->owner = PCIE_INVALID; + block->filp = (DRMFILE) -1; + return block; } -#if 0 -static void xgi_mem_insert_node_after(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block); -static void xgi_mem_insert_node_before(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block); -static void xgi_mem_insert_node_head(struct xgi_mem_list * list, - struct xgi_mem_block * block); -static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, - struct xgi_mem_block * block); -static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block); -/* - * insert node:block after node:current - */ -static void xgi_mem_insert_node_after(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block) -{ - block->prev = current; - block->next = current->next; - current->next = block; - if (current == list->tail) { - list->tail = block; - } else { - block->next->prev = block; - } -} - -/* - * insert node:block before node:current - */ -static void xgi_mem_insert_node_before(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block) -{ - block->prev = current->prev; - block->next = current; - current->prev = block; - if (current == list->head) { - list->head = block; - } else { - block->prev->next = block; - } -} -void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block) -{ - block->next = list->head; - block->prev = NULL; - - if (NULL == list->head) { - list->tail = block; - } else { - list->head->prev = block; - } - list->head = block; -} - -static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, - struct xgi_mem_block * block) -{ - block->next = NULL; - block->prev = list->tail; - if (NULL == list->tail) { - list->head = block; - } else { - list->tail->next = block; - } - list->tail = block; -} - -static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block) -{ - if (block == list->head) { - list->head = block->next; - } - if (block == list->tail) { - list->tail = block->prev; - } - - if (block->prev) { - block->prev->next = block->next; - } - if (block->next) { - block->next->prev = block->prev; - } - - block->next = block->prev = NULL; -} -#endif -static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, - unsigned long originalSize) +struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, + unsigned long originalSize, + enum PcieOwner owner) { struct xgi_mem_block *block, *free_block, *used_block; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", + + DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); if (size == 0) { - XGI_ERROR("size == 0\n"); + DRM_ERROR("size == 0\n"); return (NULL); } - XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); - if (size > xgi_fb_heap->max_freesize) { - XGI_ERROR + DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize); + if (size > heap->max_freesize) { + DRM_ERROR ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", - size, xgi_fb_heap->max_freesize); + size, heap->max_freesize); return (NULL); } - list_for_each_entry(block, &xgi_fb_heap->free_list, list) { - XGI_INFO("free_list: 0x%px \n", free_list); + list_for_each_entry(block, &heap->free_list, list) { + DRM_INFO("block: 0x%px \n", block); if (size <= block->size) { break; } } - if (&block->list == &xgi_fb_heap->free_list) { - XGI_ERROR + if (&block->list == &heap->free_list) { + DRM_ERROR ("Can't allocate %ldk size from frame buffer memory !\n", size / 1024); return (NULL); } free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", size, free_block->offset, free_block->size); if (size == free_block->size) { used_block = free_block; - XGI_INFO("size == free_block->size: free_block = 0x%p\n", + DRM_INFO("size == free_block->size: free_block = 0x%p\n", free_block); list_del(&free_block->list); } else { @@ -372,7 +155,7 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, return (NULL); if (used_block == free_block) { - XGI_ERROR("used_block == free_block = 0x%p\n", + DRM_ERROR("used_block == free_block = 0x%p\n", used_block); } @@ -383,14 +166,16 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, free_block->size -= size; } - xgi_fb_heap->max_freesize -= size; + heap->max_freesize -= size; - list_add(&used_block->list, &xgi_fb_heap->used_list); + list_add(&used_block->list, &heap->used_list); + used_block->owner = owner; return (used_block); } -static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset) +int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + DRMFILE filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -398,28 +183,32 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long unsigned long upper; unsigned long lower; - list_for_each_entry(block, &xgi_fb_heap->used_list, list) { + list_for_each_entry(block, &heap->used_list, list) { if (block->offset == offset) { break; } } - if (&block->list == &xgi_fb_heap->used_list) { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); + if (&block->list == &heap->used_list) { + DRM_ERROR("can't find block: 0x%lx to free!\n", offset); + return DRM_ERR(ENOENT); + } + + if (block->filp != filp) { + return DRM_ERR(EPERM); } used_block = block; - XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", + DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", used_block, used_block->offset, used_block->size); - xgi_fb_heap->max_freesize += used_block->size; + heap->max_freesize += used_block->size; prev = next = NULL; upper = used_block->offset + used_block->size; lower = used_block->offset; - list_for_each_entry(block, &xgi_fb_heap->free_list, list) { + list_for_each_entry(block, &heap->free_list, list) { if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { @@ -427,41 +216,157 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long } } - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev); list_del(&used_block->list); if (prev && next) { prev->size += (used_block->size + next->size); list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_fb_cache_block, next); - kmem_cache_free(xgi_fb_cache_block, used_block); - - next = NULL; - used_block = NULL; - return (prev); + DRM_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_mem_block_cache, next); + kmem_cache_free(xgi_mem_block_cache, used_block); } - - if (prev) { + else if (prev) { prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (prev); + DRM_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_mem_block_cache, used_block); } - - if (next) { + else if (next) { next->size += used_block->size; next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (next); + DRM_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_mem_block_cache, used_block); + } + else { + list_add(&used_block->list, &heap->free_list); + DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); } - list_add(&used_block->list, &xgi_fb_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - return (used_block); + return 0; +} + + +int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + DRMFILE filp) +{ + struct xgi_mem_block *block; + + if (alloc->is_front) { + alloc->location = XGI_MEMLOC_LOCAL; + alloc->offset = 0; + alloc->hw_addr = 0; + DRM_INFO + ("Video RAM allocation on front buffer successfully! \n"); + } else { + down(&info->fb_sem); + block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D); + up(&info->fb_sem); + + if (block == NULL) { + alloc->location = XGI_MEMLOC_LOCAL; + alloc->size = 0; + DRM_ERROR("Video RAM allocation failed\n"); + return DRM_ERR(ENOMEM); + } else { + DRM_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *)block->offset); + alloc->location = XGI_MEMLOC_LOCAL; + alloc->size = block->size; + alloc->offset = block->offset; + alloc->hw_addr = block->offset; + + block->filp = filp; + } + } + + return 0; +} + + +int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_mem_alloc alloc; + struct xgi_info *info = dev->dev_private; + int err; + + DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, + sizeof(alloc)); + + err = xgi_fb_alloc(info, & alloc, filp); + if (err) { + return err; + } + + DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, + alloc, sizeof(alloc)); + + return 0; +} + + +int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +{ + int err = 0; + + if (offset == 0) { + DRM_INFO("free onscreen frame buffer successfully !\n"); + } else { + down(&info->fb_sem); + err = xgi_mem_free(&info->fb_heap, offset, filp); + up(&info->fb_sem); + } + + return err; +} + + +int xgi_fb_free_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + u32 offset; + + DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, + sizeof(offset)); + + return xgi_fb_free(info, offset, filp); +} + + +int xgi_fb_heap_init(struct xgi_info * info) +{ + return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START, + info->fb.size); +} + +/** + * Free all blocks associated with a particular file handle. + */ +void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp) +{ + if (!info->fb_heap.initialized) { + return; + } + + down(&info->fb_sem); + + do { + struct xgi_mem_block *block; + + list_for_each_entry(block, &info->fb_heap.used_list, list) { + if (block->filp == filp) { + break; + } + } + + if (&block->list == &info->fb_heap.used_list) { + break; + } + + (void) xgi_fb_free(info, block->offset, filp); + } while(1); + + up(&info->fb_sem); } diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h deleted file mode 100644 index 363c8bc8..00000000 --- a/linux-core/xgi_fb.h +++ /dev/null @@ -1,47 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_FB_H_ -#define _XGI_FB_H_ - -struct xgi_mem_block { - struct list_head list; - unsigned long offset; - unsigned long size; - atomic_t use_count; -}; - -struct xgi_mem_heap { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; - spinlock_t lock; -}; - -#endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h deleted file mode 100644 index 99bf2d04..00000000 --- a/linux-core/xgi_linux.h +++ /dev/null @@ -1,490 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_LINUX_H_ -#define _XGI_LINUX_H_ - -#ifndef LINUX_VERSION_CODE -#include -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -# error "This driver does not support pre-2.6 kernels!" -#endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10) -# define XGI_REMAP_PFN_RANGE_PRESENT -#else -# define XGI_REMAP_PAGE_RANGE_5 -#endif - -#if defined (CONFIG_SMP) && !defined (__SMP__) -#define __SMP__ -#endif - -#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) -#define MODVERSIONS -#endif - -#include /* printk */ -#include - -#include /* module_init, module_exit */ -#include /* pic_t, size_t, __u32, etc */ -#include /* error codes */ -#include /* circular linked list */ -#include /* NULL, offsetof */ -#include /* wait queues */ - -#include /* kmalloc, kfree, etc */ -#include /* vmalloc, vfree, etc */ - -#include /* poll_wait */ -#include /* mdelay, udelay */ -#include /* rdtsc rdtscl */ - -#include /* suser(), capable() replacement - for_each_task, for_each_process */ -#ifdef for_each_process -#define XGI_SCAN_PROCESS(p) for_each_process(p) -#else -#define XGI_SCAN_PROCESS(p) for_each_task(p) -#endif - -#include /* module_param() */ -#include /* kernel_locked */ -#include /* flush_tlb(), flush_tlb_all() */ -#include /* page table entry lookup */ - -#include /* pci_find_class, etc */ -#include /* tasklets, interrupt helpers */ -#include - -#include /* cli, sli, save_flags */ -#include /* ioremap, virt_to_phys */ -#include /* access_ok */ -#include /* PAGE_OFFSET */ -#include /* pte bit definitions */ - -#include -#include -#include - -#ifdef CONFIG_PROC_FS -#include -#endif - -#ifdef CONFIG_DEVFS_FS -#include -#endif - -#ifdef CONFIG_KMOD -#include -#endif - -#ifdef CONFIG_PM -#include -#endif - -#ifdef CONFIG_MTRR -#include -#endif - -#ifdef CONFIG_KDB -#include -#include -#endif - -#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) -#define AGPGART -#include -#include -#endif - -#ifndef MAX_ORDER -#define MAX_ORDER 11 -#endif - -#ifndef module_init -#define module_init(x) int init_module(void) { return x(); } -#define module_exit(x) void cleanup_module(void) { x(); } -#endif - -#ifndef minor -#define minor(x) MINOR(x) -#endif - -#ifndef IRQ_HANDLED -typedef void irqreturn_t; -#define IRQ_NONE -#define IRQ_HANDLED -#define IRQ_RETVAL(x) -#endif - -#if !defined (list_for_each) -#define list_for_each(pos, head) \ - for (pos = (head)->next, prefetch(pos->next); pos != (head); \ - pos = pos->next, prefetch(pos->next)) -#endif - -extern struct list_head pci_devices; /* list of all devices */ -#define XGI_PCI_FOR_EACH_DEV(dev) \ - for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) - -/* - * the following macro causes problems when used in the same module - * as module_param(); undef it so we don't accidentally mix the two - */ -#undef MODULE_PARM - -#ifdef EXPORT_NO_SYMBOLS -EXPORT_NO_SYMBOLS; -#endif - -#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) -#define XGI_NUM_CPUS() num_online_cpus() -#define XGI_CLI() local_irq_disable() -#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) -#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) - - -#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) - -/* common defines */ -#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) -#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) - -#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) -#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) -#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) - -#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) -#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) - -#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) -#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) - -#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number -#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) - -#define XGI_PCI_GET_CLASS_PRESENT -#ifdef XGI_PCI_GET_CLASS_PRESENT -#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) -#else -#define XGI_PCI_DEV_PUT(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) -#endif - -/* - * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver - * model is not sufficient for full acpi support. it may work in some cases, - * but not enough for us to officially support this configuration. - */ -#if defined(CONFIG_ACPI) -#define XGI_PM_SUPPORT_ACPI -#endif - -#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) -#define XGI_PM_SUPPORT_APM -#endif - -#if defined(CONFIG_DEVFS_FS) -typedef void *devfs_handle_t; -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = NULL; \ - if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ - S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ - { \ - __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ - } \ - __handle; \ - }) -/* -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) -*/ -#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") -#endif /* defined(CONFIG_DEVFS_FS) */ - -#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) - -#if defined(XGI_REMAP_PFN_RANGE_PRESENT) -#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ - remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) -#elif defined(XGI_REMAP_PAGE_RANGE_5) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#elif defined(XGI_REMAP_PAGE_RANGE_4) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) -#else -#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#endif - -#if defined(pmd_offset_map) -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset_map(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) \ - { \ - pmd_unmap(pg_mid_dir); \ - } -#else -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) -#endif - -#define XGI_PMD_PRESENT(pg_mid_dir) \ - ({ \ - if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ - { \ - XGI_PMD_UNMAP(pg_mid_dir); \ - pg_mid_dir = NULL; \ - } \ - pg_mid_dir != NULL; \ - }) - -#if defined(pte_offset_atomic) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_atomic(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_kunmap(pte); \ - } -#elif defined(pte_offset) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) -#else -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_map(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_unmap(pte); \ - } -#endif - -#define XGI_PTE_PRESENT(pte) \ - ({ \ - if (pte) \ - { \ - if (!pte_present(*pte)) \ - { \ - XGI_PTE_UNMAP(pte); pte = NULL; \ - } \ - } \ - pte != NULL; \ - }) - -#define XGI_PTE_VALUE(pte) \ - ({ \ - unsigned long __pte_value = pte_val(*pte); \ - XGI_PTE_UNMAP(pte); \ - __pte_value; \ - }) - -#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) -#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) - -#if !defined (pgprot_noncached) -static inline pgprot_t pgprot_noncached(pgprot_t old_prot) -{ - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); - return new_prot; -} -#endif - -#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) -/* Added define for write combining page, only valid if pat enabled. */ -#define _PAGE_WRTCOMB _PAGE_PWT -#define __PAGE_KERNEL_WRTCOMB \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) -#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) - -static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) -{ - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) { - pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); - } - return new_prot; -} -#endif - -#if !defined(page_to_pfn) -#define page_to_pfn(page) ((page) - mem_map) -#endif - -#define XGI_VMALLOC(ptr, size) \ - { \ - (ptr) = vmalloc_32(size); \ - } - -#define XGI_VFREE(ptr, size) \ - { \ - vfree((void *) (ptr)); \ - } - -#define XGI_IOREMAP(ptr, physaddr, size) \ - { \ - (ptr) = ioremap(physaddr, size); \ - } - -#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ - { \ - (ptr) = ioremap_nocache(physaddr, size); \ - } - -#define XGI_IOUNMAP(ptr, size) \ - { \ - iounmap(ptr); \ - } - -/* - * only use this because GFP_KERNEL may sleep.. - * GFP_ATOMIC is ok, it won't sleep - */ -#define XGI_KMALLOC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_KERNEL); \ - } - -#define XGI_KMALLOC_ATOMIC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_ATOMIC); \ - } - -#define XGI_KFREE(ptr, size) \ - { \ - kfree((void *) (ptr)); \ - } - -#define XGI_GET_FREE_PAGES(ptr, order) \ - { \ - (ptr) = __get_free_pages(GFP_KERNEL, order); \ - } - -#define XGI_FREE_PAGES(ptr, order) \ - { \ - free_pages(ptr, order); \ - } - -struct xgi_pte { - unsigned long phys_addr; - unsigned long virt_addr; -}; - -/* - * AMD Athlon processors expose a subtle bug in the Linux - * kernel, that may lead to AGP memory corruption. Recent - * kernel versions had a workaround for this problem, but - * 2.4.20 is the first kernel to address it properly. The - * page_attr API provides the means to solve the problem. - */ -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr) -{ - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); -} -static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr) -{ - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL); -} - -/* add for SUSE 9, Jill*/ -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) -#else -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) -#endif -#define XGILockPage(page) SetPageLocked(page) -#define XGIUnlockPage(page) ClearPageLocked(page) - -struct xgi_file_private { - struct xgi_info *info; - unsigned int num_events; - spinlock_t fp_lock; - wait_queue_head_t wait_queue; -}; - -#define FILE_PRIVATE(filp) ((filp)->private_data) - -#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp)) - -/* for the card devices */ -#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) - -#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) - -#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) -#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) -#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) -#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) -#define XGI_ATOMIC_READ(data) atomic_read(&(data)) - -/* - * lock-related functions that should only be called from this file - */ -#define xgi_init_lock(lock) spin_lock_init(&lock) -#define xgi_lock(lock) spin_lock(&lock) -#define xgi_unlock(lock) spin_unlock(&lock) -#define xgi_down(lock) down(&lock) -#define xgi_up(lock) up(&lock) - -#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) -#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) - -#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index bb2813ca..7f3d9d6e 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -26,17 +26,21 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" -#include "xgi_pcie.h" -void xgi_ge_reset(struct xgi_info * info) +int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS) { + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + xgi_disable_ge(info); xgi_enable_ge(info); + + return 0; } + /* * irq functions */ @@ -113,7 +117,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) u8 old_index; u8 old_36; - XGI_INFO("Can not reset back 0x%x!\n", + DRM_INFO("Can not reset back 0x%x!\n", ge_3d_status[0x00]); *(mmio_vbase + 0xb057) = 0; @@ -151,7 +155,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile u8 *const mmio_vbase = info->mmio.vbase; + volatile u8 *const mmio_vbase = info->mmio_map->handle; volatile u32 *const ge_3d_status = (volatile u32 *)(mmio_vbase + 0x2800); const u32 int_status = ge_3d_status[4]; @@ -185,7 +189,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) continue_int_count = 0; /* GE Hung up, need reset. */ - XGI_INFO("Reset GE!\n"); + DRM_INFO("Reset GE!\n"); xgi_ge_hang_reset(mmio_vbase); } @@ -205,23 +209,23 @@ bool xgi_ge_irq_handler(struct xgi_info * info) bool xgi_crt_irq_handler(struct xgi_info * info) { bool ret = FALSE; - u8 save_3ce = bReadReg(0x3ce); + u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened + if (IN3CFB(info->mmio_map, 0x37) & 0x01) // CRT1 interrupt just happened { u8 op3cf_3d; u8 op3cf_37; // What happened? - op3cf_37 = bIn3cf(0x37); + op3cf_37 = IN3CFB(info->mmio_map, 0x37); // Clear CRT interrupt - op3cf_3d = bIn3cf(0x3d); - bOut3cf(0x3d, (op3cf_3d | 0x04)); - bOut3cf(0x3d, (op3cf_3d & ~0x04)); + op3cf_3d = IN3CFB(info->mmio_map, 0x3d); + OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04)); + OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04)); ret = TRUE; } - bWriteReg(0x3ce, save_3ce); + DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); return (ret); } @@ -229,36 +233,36 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool xgi_dvi_irq_handler(struct xgi_info * info) { bool ret = FALSE; - u8 save_3ce = bReadReg(0x3ce); + const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened - { + if (IN3CFB(info->mmio_map, 0x38) & 0x20) { // DVI interrupt just happened + const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4); u8 op3cf_39; u8 op3cf_37; u8 op3x5_5a; - u8 save_3x4 = bReadReg(0x3d4);; // What happened? - op3cf_37 = bIn3cf(0x37); + op3cf_37 = IN3CFB(info->mmio_map, 0x37); //Notify BIOS that DVI plug/unplug happened - op3x5_5a = bIn3x5(0x5a); - bOut3x5(0x5a, op3x5_5a & 0xf7); + op3x5_5a = IN3X5B(info->mmio_map, 0x5a); + OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7); - bWriteReg(0x3d4, save_3x4); + DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4); // Clear DVI interrupt - op3cf_39 = bIn3cf(0x39); - bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 - bOut3c5(0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 + op3cf_39 = IN3CFB(info->mmio_map, 0x39); + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 ret = TRUE; } - bWriteReg(0x3ce, save_3ce); + DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); return (ret); } + void xgi_dump_register(struct xgi_info * info) { int i, j; @@ -281,7 +285,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bIn3c5(i * 0x10 + j); + temp = IN3C5B(info->mmio_map, i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -303,7 +307,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bIn3x5(i * 0x10 + j); + temp = IN3X5B(info->mmio_map, i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -325,7 +329,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bIn3cf(i * 0x10 + j); + temp = IN3CFB(info->mmio_map, i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -346,7 +350,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0xB000 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -366,7 +370,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2200 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -386,7 +390,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2300 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -406,7 +410,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2400 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -426,17 +430,34 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2800 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); } } -void xgi_restore_registers(struct xgi_info * info) + +int xgi_dump_register_ioctl(DRM_IOCTL_ARGS) { - bOut3x5(0x13, 0); - bOut3x5(0x8b, 2); + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + + xgi_dump_register(info); + + return 0; +} + + +int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + + OUT3X5B(info->mmio_map, 0x13, 0); + OUT3X5B(info->mmio_map, 0x8b, 2); + + return 0; } void xgi_waitfor_pci_idle(struct xgi_info * info) @@ -446,60 +467,10 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) int idleCount = 0; while (idleCount < 5) { - if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) { + if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) { idleCount = 0; } else { idleCount++; } } } - - -/*memory collect function*/ -extern struct list_head xgi_mempid_list; -void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) -{ - struct xgi_mem_pid *block; - struct xgi_mem_pid *next; - struct task_struct *p, *find; - unsigned int cnt = 0; - - list_for_each_entry_safe(block, next, &xgi_mempid_list, list) { - - find = NULL; - XGI_SCAN_PROCESS(p) { - if (p->pid == block->pid) { - XGI_INFO - ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", - block->pid, p->state, - block->location, - block->bus_addr); - find = p; - if (block->bus_addr == 0xFFFFFFFF) - ++cnt; - break; - } - } - if (!find) { - if (block->location == XGI_MEMLOC_LOCAL) { - XGI_INFO - ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", - block->pid, block->bus_addr); - xgi_fb_free(info, block->bus_addr); - } else if (block->bus_addr != 0xFFFFFFFF) { - XGI_INFO - ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", - block->pid, block->bus_addr); - xgi_pcie_free(info, block->bus_addr); - } else { - /*only delete the memory block */ - list_del(&block->list); - XGI_INFO - ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", - block->pid); - kfree(block); - } - } - } - *pcnt = cnt; -} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 9c0591b2..10638b2d 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -30,9 +30,7 @@ #define _XGI_MISC_H_ extern void xgi_dump_register(struct xgi_info * info); -extern void xgi_ge_reset(struct xgi_info * info); -extern void xgi_restore_registers(struct xgi_info * info); extern bool xgi_ge_irq_handler(struct xgi_info * info); extern bool xgi_crt_irq_handler(struct xgi_info * info); extern bool xgi_dvi_irq_handler(struct xgi_info * info); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index cfc9febc..49c531fc 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -26,176 +26,81 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" -#include "xgi_pcie.h" #include "xgi_misc.h" -static struct xgi_pcie_heap *xgi_pcie_heap = NULL; -static struct kmem_cache *xgi_pcie_cache_block = NULL; -static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL; -static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL; -static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL; -extern struct list_head xgi_mempid_list; - -static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) -{ - struct page *page; - unsigned long page_addr = 0; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - - if (page_addr == 0UL) { - XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", - page_count); - return 0; - } - - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) { - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - } - - XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", - page_count, page_order, page_addr); - return page_addr; -} - -static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) -{ - struct page *page; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - - free_pages(page_addr, page_order); -} +static struct xgi_mem_block *xgi_pcie_vertex_block = NULL; +static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; +static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; static int xgi_pcie_lut_init(struct xgi_info * info) { - unsigned char *page_addr = NULL; - unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; - unsigned long count = 0; u8 temp = 0; + int err; + unsigned i; + struct drm_scatter_gather request; + struct drm_sg_mem *sg; + u32 *lut; - /* Jong 06/06/2006 */ - unsigned long pcie_aperture_size; - - info->pcie.size = 128 * 1024 * 1024; /* Get current FB aperture size */ - temp = In3x5(0x27); - XGI_INFO("In3x5(0x27): 0x%x \n", temp); + temp = IN3X5B(info->mmio_map, 0x27); + DRM_INFO("In3x5(0x27): 0x%x \n", temp); if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */ - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size = 256 * 1024 * 1024; - /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */ + info->pcie.base = 256 * 1024 * 1024; } else { /* 128MB; Jong 06/05/2006; 0x08000000 */ - - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size = 128 * 1024 * 1024; - /* info->pcie.base = 128 * 1024 * 1024; */ + info->pcie.base = 128 * 1024 * 1024; } - /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ - /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ - /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ - /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */ - info->pcie.base = pcie_aperture_size; /* works */ - /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */ - /* info->pcie.base=128 * 1024 * 1024; *//* System hang */ - XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base); /* Get current lookup table page size */ - temp = bReadReg(0xB00C); + temp = DRM_READ8(info->mmio_map, 0xB00C); if (temp & 0x04) { /* 8KB */ info->lutPageSize = 8 * 1024; } else { /* 4KB */ - info->lutPageSize = 4 * 1024; } - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize); -#if 0 - /* Get current lookup table location */ - temp = bReadReg(0xB00C); - if (temp & 0x02) { /* LFB */ - info->isLUTInLFB = TRUE; - /* Current we only support lookup table in LFB */ - temp &= 0xFD; - bWriteReg(0xB00C, temp); - info->isLUTInLFB = FALSE; - } else { /* SFB */ - info->isLUTInLFB = FALSE; + request.size = info->pcie.size; + err = drm_sg_alloc(info->dev, & request); + if (err) { + DRM_ERROR("cannot allocate PCIE GART backing store! " + "size = %d\n", info->pcie.size); + return err; } - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + sg = info->dev->sg; - /* Get current SDFB page size */ - temp = bReadReg(0xB00C); - if (temp & 0x08) { /* 8MB */ - info->sdfbPageSize = 8 * 1024 * 1024; - } else { /* 4MB */ - - info->sdfbPageSize = 4 * 1024 * 1024; - } -#endif - pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; - - /* - * Allocate memory for PCIE GART table; - */ - lutEntryNum = pciePageCount; - lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; - - /* get page_order base on page_count */ - count = lutPageCount; - for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ; - - if ((lutPageCount << 1) == (1 << lutPageOrder)) { - lutPageOrder -= 1; + info->lut_handle = drm_pci_alloc(info->dev, + sizeof(u32) * sg->pages, + PAGE_SIZE, + DMA_31BIT_MASK); + if (info->lut_handle == NULL) { + DRM_ERROR("cannot allocate PCIE lut page!\n"); + return DRM_ERR(ENOMEM); } - XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", - lutEntryNum, lutPageCount, lutPageOrder); + lut = info->lut_handle->vaddr; + for (i = 0; i < sg->pages; i++) { + info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev, + sg->pagelist[i], + 0, + PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(info->dev->sg->busaddr[i])) { + DRM_ERROR("cannot map GART backing store for DMA!\n"); + return DRM_ERR(-(info->dev->sg->busaddr[i])); + } - info->lutPageOrder = lutPageOrder; - page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); - - if (!page_addr) { - XGI_ERROR("cannot allocate PCIE lut page!\n"); - goto fail; + lut[i] = info->dev->sg->busaddr[i]; } - info->lut_base = (unsigned long *)page_addr; - - XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", - page_addr, virt_to_phys(page_addr)); - - XGI_INFO - ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", - info->lut_base, __pa(info->lut_base), info->lutPageOrder); - - /* - * clean all PCIE GART Entry - */ - memset(page_addr, 0, PAGE_SIZE << lutPageOrder); #if defined(__i386__) || defined(__x86_64__) asm volatile ("wbinvd":::"memory"); @@ -204,675 +109,186 @@ static int xgi_pcie_lut_init(struct xgi_info * info) #endif /* Set GART in SFB */ - bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); - /* Set GART base address to HW */ - dwWriteReg(0xB034, __pa(info->lut_base)); + temp = DRM_READ8(info->mmio_map, 0xB00C); + DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); + + /* Set GART base address to HW */ + dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr); - return 1; - fail: return 0; } -static void xgi_pcie_lut_cleanup(struct xgi_info * info) +void xgi_pcie_lut_cleanup(struct xgi_info * info) { - if (info->lut_base) { - XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", - info->lut_base, info->lutPageOrder); - xgi_pcie_lut_free((unsigned long)info->lut_base, - info->lutPageOrder); - info->lut_base = NULL; - } -} - -static struct xgi_pcie_block *xgi_pcie_new_node(void) -{ - struct xgi_pcie_block *block = - (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block, - GFP_KERNEL); - if (block == NULL) { - return NULL; + if (info->dev->sg) { + drm_sg_free(info->dev, info->dev->sg->handle); } - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = 0; /* The block size. */ - block->bus_addr = 0; /* CPU access address/bus address */ - block->hw_addr = 0; /* GE access address */ - block->page_count = 0; - block->page_order = 0; - block->page_block = NULL; - block->page_table = NULL; - block->owner = PCIE_INVALID; - - return block; -} - -static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block) -{ - struct page *page; - struct xgi_page_block *page_block = block->page_block; - struct xgi_page_block *free_block; - unsigned long page_count = 0; - int i; - - //XGI_INFO("block->page_block: 0x%p \n", block->page_block); - while (page_block) { - page_count = page_block->page_count; - - page = virt_to_page(page_block->virt_addr); - for (i = 0; i < page_count; i++, page++) { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - free_pages(page_block->virt_addr, page_block->page_order); - - page_block->phys_addr = 0; - page_block->virt_addr = 0; - page_block->page_count = 0; - page_block->page_order = 0; - - free_block = page_block; - page_block = page_block->next; - //XGI_INFO("free free_block: 0x%p \n", free_block); - kfree(free_block); - free_block = NULL; - } - - if (block->page_table) { - //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); - kfree(block->page_table); - block->page_table = NULL; + if (info->lut_handle) { + drm_pci_free(info->dev, info->lut_handle); + info->lut_handle = NULL; } } int xgi_pcie_heap_init(struct xgi_info * info) { - struct xgi_pcie_block *block; + int err; - if (!xgi_pcie_lut_init(info)) { - XGI_ERROR("xgi_pcie_lut_init failed\n"); - return 0; + err = xgi_pcie_lut_init(info); + if (err) { + DRM_ERROR("xgi_pcie_lut_init failed\n"); + return err; } - xgi_pcie_heap = - (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL); - if (!xgi_pcie_heap) { - XGI_ERROR("xgi_pcie_heap alloc failed\n"); - goto fail1; - } - INIT_LIST_HEAD(&xgi_pcie_heap->free_list); - INIT_LIST_HEAD(&xgi_pcie_heap->used_list); - INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); - xgi_pcie_heap->max_freesize = info->pcie.size; - - xgi_pcie_cache_block = - kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0, - SLAB_HWCACHE_ALIGN, NULL, NULL); - - if (NULL == xgi_pcie_cache_block) { - XGI_ERROR("Fail to creat xgi_pcie_block\n"); - goto fail2; + err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size); + if (err) { + xgi_pcie_lut_cleanup(info); } - block = (struct xgi_pcie_block *) xgi_pcie_new_node(); - if (!block) { - XGI_ERROR("xgi_pcie_new_node failed\n"); - goto fail3; - } - - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = info->pcie.size; - - list_add(&block->list, &xgi_pcie_heap->free_list); - - XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", - block->offset, block->size); - return 1; - fail3: - if (xgi_pcie_cache_block) { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } - - fail2: - if (xgi_pcie_heap) { - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } - fail1: - xgi_pcie_lut_cleanup(info); - return 0; + return err; } -void xgi_pcie_heap_cleanup(struct xgi_info * info) + +int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + DRMFILE filp) { - struct list_head *free_list; - struct xgi_pcie_block *block; - struct xgi_pcie_block *next; - int j; + struct xgi_mem_block *block; - xgi_pcie_lut_cleanup(info); - XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); + down(&info->pcie_sem); + if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) { + DRM_INFO("PCIE Vertex has been created, return directly.\n"); + block = xgi_pcie_vertex_block; + } + else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) { + DRM_INFO("PCIE Cmdlist has been created, return directly.\n"); + block = xgi_pcie_cmdlist_block; + } + else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) { + DRM_INFO("PCIE Scratchpad has been created, return directly.\n"); + block = xgi_pcie_scratchpad_block; + } + else { + block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner); - if (xgi_pcie_heap) { - free_list = &xgi_pcie_heap->free_list; - for (j = 0; j < 3; j++, free_list++) { - list_for_each_entry_safe(block, next, free_list, list) { - XGI_INFO - ("No. %d block offset: 0x%lx size: 0x%lx\n", - j, block->offset, block->size); - xgi_pcie_block_stuff_free(block); - block->bus_addr = 0; - block->hw_addr = 0; - - //XGI_INFO("No. %d free block: 0x%p \n", j, block); - kmem_cache_free(xgi_pcie_cache_block, block); - } + if (alloc->owner == PCIE_3D) { + xgi_pcie_vertex_block = block; } - - XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } - - if (xgi_pcie_cache_block) { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } -} - -static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, - unsigned long originalSize, - enum PcieOwner owner) -{ - struct xgi_pcie_block *block, *used_block, *free_block; - struct xgi_page_block *page_block, *prev_page_block; - struct page *page; - unsigned long page_order = 0, count = 0, index = 0; - unsigned long page_addr = 0; - u32 *lut_addr = NULL; - unsigned long lut_id = 0; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - int i, j, page_count = 0; - int temp = 0; - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", - originalSize, size); - - if (owner == PCIE_3D) { - if (xgi_pcie_vertex_block) { - XGI_INFO - ("PCIE Vertex has been created, return directly.\n"); - return xgi_pcie_vertex_block; + else if (alloc->owner == PCIE_3D_CMDLIST) { + xgi_pcie_cmdlist_block = block; + } + else if (alloc->owner == PCIE_3D_SCRATCHPAD) { + xgi_pcie_scratchpad_block = block; } } - - if (owner == PCIE_3D_CMDLIST) { - if (xgi_pcie_cmdlist_block) { - XGI_INFO - ("PCIE Cmdlist has been created, return directly.\n"); - return xgi_pcie_cmdlist_block; - } - } - - if (owner == PCIE_3D_SCRATCHPAD) { - if (xgi_pcie_scratchpad_block) { - XGI_INFO - ("PCIE Scratchpad has been created, return directly.\n"); - return xgi_pcie_scratchpad_block; - } - } - - if (size == 0) { - XGI_ERROR("size == 0 \n"); - return (NULL); - } - - XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); - if (size > xgi_pcie_heap->max_freesize) { - XGI_ERROR - ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", - size, xgi_pcie_heap->max_freesize); - return (NULL); - } - - /* Jong 05/30/2006; find next free list which has enough space */ - list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { - if (size <= block->size) { - break; - } - } - - if (&block->list == &xgi_pcie_heap->free_list) { - XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", - size / 1024); - return (NULL); - } - - free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) { - used_block = free_block; - XGI_INFO("size==free_block->size: free_block = 0x%p\n", - free_block); - list_del(&free_block->list); - } else { - used_block = xgi_pcie_new_node(); - if (used_block == NULL) { - return NULL; - } - - if (used_block == free_block) { - XGI_ERROR("used_block == free_block = 0x%p\n", - used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - xgi_pcie_heap->max_freesize -= size; - - used_block->bus_addr = info->pcie.base + used_block->offset; - used_block->hw_addr = info->pcie.base + used_block->offset; - used_block->page_count = page_count = size / PAGE_SIZE; - - /* get page_order base on page_count */ - for (used_block->page_order = 0; page_count; page_count >>= 1) { - ++used_block->page_order; - } - - if ((used_block->page_count << 1) == (1 << used_block->page_order)) { - used_block->page_order--; - } - XGI_INFO - ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", - used_block->offset, used_block->size, used_block->bus_addr, - used_block->hw_addr, used_block->page_count, - used_block->page_order); - - used_block->page_block = NULL; - //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL); - //if (!used_block->page_block) return NULL;_t - //used_block->page_block->next = NULL; - - used_block->page_table = - (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count, - GFP_KERNEL); - if (used_block->page_table == NULL) { - goto fail; - } - - lut_id = (used_block->offset >> PAGE_SHIFT); - lut_addr = info->lut_base; - lut_addr += lut_id; - XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); - - /* alloc free pages from system */ - page_count = used_block->page_count; - page_block = used_block->page_block; - prev_page_block = used_block->page_block; - for (i = 0; page_count > 0; i++) { - /* if size is bigger than 2M bytes, it should be split */ - if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) { - page_order = XGI_PCIE_ALLOC_MAX_ORDER; - } else { - count = page_count; - for (page_order = 0; count; count >>= 1, ++page_order) ; - - if ((page_count << 1) == (1 << page_order)) { - page_order -= 1; - } - } - - count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", - page_addr); - - if (!page_addr) { - XGI_ERROR - ("No: %d :Can't get free pages: 0x%lx from system memory !\n", - i, count); - goto fail; - } - - /* Jong 05/30/2006; test */ - memset((unsigned char *)page_addr, 0xFF, - PAGE_SIZE << page_order); - /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ - - if (page_block == NULL) { - page_block = - (struct xgi_page_block *) - kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL); - if (!page_block) { - XGI_ERROR - ("Can't get memory for page_block! \n"); - goto fail; - } - } - - if (prev_page_block == NULL) { - used_block->page_block = page_block; - prev_page_block = page_block; - } else { - prev_page_block->next = page_block; - prev_page_block = page_block; - } - - page_block->next = NULL; - page_block->phys_addr = __pa(page_addr); - page_block->virt_addr = page_addr; - page_block->page_count = count; - page_block->page_order = page_order; - - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", - page_block->phys_addr); - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", - page_block->virt_addr); - - page = virt_to_page(page_addr); - - //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" - // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", - // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); - - for (j = 0; j < count; j++, page++, lut_addr++) { - used_block->page_table[index + j].phys_addr = - __pa(page_address(page)); - used_block->page_table[index + j].virt_addr = - (unsigned long)page_address(page); - - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", - used_block->page_table[index + j].phys_addr); - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", - used_block->page_table[index + j].virt_addr); - - *lut_addr = __pa(page_address(page)); - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - - if (temp) { - XGI_INFO - ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", - __pa(page_address(page)), lut_addr, j, - *lut_addr); - temp--; - } - } - - page_block = page_block->next; - page_count -= count; - index += count; - temp = 0; - } - - used_block->owner = owner; - list_add(&used_block->list, &xgi_pcie_heap->used_list); - -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#else - mb(); -#endif - - /* Flush GART Table */ - bWriteReg(0xB03F, 0x40); - bWriteReg(0xB03F, 0x00); - - if (owner == PCIE_3D) { - xgi_pcie_vertex_block = used_block; - } - - if (owner == PCIE_3D_CMDLIST) { - xgi_pcie_cmdlist_block = used_block; - } - - if (owner == PCIE_3D_SCRATCHPAD) { - xgi_pcie_scratchpad_block = used_block; - } - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); - return (used_block); - - fail: - xgi_pcie_block_stuff_free(used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - return NULL; -} - -static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, - unsigned long offset) -{ - struct xgi_pcie_block *used_block, *block; - struct xgi_pcie_block *prev, *next; - unsigned long upper, lower; - - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - if (block->offset == offset) { - break; - } - } - - if (&block->list == &xgi_pcie_heap->used_list) { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); - } - - used_block = block; - XGI_INFO - ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", - used_block, used_block->offset, used_block->size, - used_block->bus_addr, used_block->hw_addr); - - xgi_pcie_block_stuff_free(used_block); - - /* update xgi_pcie_heap */ - xgi_pcie_heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { - if (block->offset == upper) { - next = block; - } else if ((block->offset + block->size) == lower) { - prev = block; - } - } - - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) { - prev->size += (used_block->size + next->size); - list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_pcie_cache_block, next); - kmem_cache_free(xgi_pcie_cache_block, used_block); - next = NULL; - used_block = NULL; - return (prev); - } - - if (prev) { - prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (prev); - } - - if (next) { - next->size += used_block->size; - next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (next); - } - - used_block->bus_addr = 0; - used_block->hw_addr = 0; - used_block->page_count = 0; - used_block->page_order = 0; - list_add(&used_block->list, &xgi_pcie_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - return (used_block); -} - -void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - pid_t pid) -{ - struct xgi_pcie_block *block; - - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner); - xgi_up(info->pcie_sem); + up(&info->pcie_sem); if (block == NULL) { alloc->location = XGI_MEMLOC_INVALID; alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("PCIE RAM allocation failed\n"); + DRM_ERROR("PCIE RAM allocation failed\n"); + return DRM_ERR(ENOMEM); } else { - XGI_INFO - ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", - block->offset, block->bus_addr); + DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n", + block->offset); alloc->location = XGI_MEMLOC_NON_LOCAL; alloc->size = block->size; - alloc->bus_addr = block->bus_addr; - alloc->hw_addr = block->hw_addr; + alloc->hw_addr = block->offset + info->pcie.base; + alloc->offset = block->offset; - /* - manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. - PCIE_3D request means a opengl process created. - PCIE_3D_TEXTURE request means texture cannot alloc from fb. - */ - if ((alloc->owner == PCIE_3D) - || (alloc->owner == PCIE_3D_TEXTURE)) { - struct xgi_mem_pid *mempid_block = - kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - mempid_block->location = XGI_MEMLOC_NON_LOCAL; - if (alloc->owner == PCIE_3D) - mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ - else - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = pid; - - XGI_INFO - ("Memory ProcessID add one pcie block pid:%ld successfully! \n", - mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } + block->filp = filp; + return 0; } } -void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) + +int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS) { - struct xgi_pcie_block *block; - unsigned long offset = bus_addr - info->pcie.base; - struct xgi_mem_pid *mempid_block; - struct xgi_mem_pid *mempid_freeblock = NULL; - char isvertex = 0; - int processcnt; + DRM_DEVICE; + struct xgi_mem_alloc alloc; + struct xgi_info *info = dev->dev_private; + int err; - if (xgi_pcie_vertex_block - && xgi_pcie_vertex_block->bus_addr == bus_addr) - isvertex = 1; + DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, + sizeof(alloc)); - if (isvertex) { - /*check is there any other process using vertex */ - processcnt = 0; + err = xgi_pcie_alloc(info, & alloc, filp); + if (err) { + return err; + } + + DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, + alloc, sizeof(alloc)); - list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == XGI_MEMLOC_NON_LOCAL - && mempid_block->bus_addr == 0xFFFFFFFF) { - ++processcnt; - } - } - if (processcnt > 1) { - return; - } + return 0; +} + + +/** + * Free all blocks associated with a particular file handle. + */ +void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) +{ + if (!info->pcie_heap.initialized) { + return; } - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_free(info, offset); - xgi_up(info->pcie_sem); + down(&info->pcie_sem); - if (block == NULL) { - XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); + do { + struct xgi_mem_block *block; + + list_for_each_entry(block, &info->pcie_heap.used_list, list) { + if (block->filp == filp) { + break; + } + } + + if (&block->list == &info->pcie_heap.used_list) { + break; + } + + (void) xgi_pcie_free(info, block->offset, filp); + } while(1); + + up(&info->pcie_sem); +} + + +int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +{ + const bool isvertex = (xgi_pcie_vertex_block + && (xgi_pcie_vertex_block->offset == offset)); + int err; + + down(&info->pcie_sem); + err = xgi_mem_free(&info->pcie_heap, offset, filp); + up(&info->pcie_sem); + + if (err) { + DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); } if (isvertex) xgi_pcie_vertex_block = NULL; - /* manage mempid */ - list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == XGI_MEMLOC_NON_LOCAL - && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) - || (!isvertex && mempid_block->bus_addr == bus_addr))) { - mempid_freeblock = mempid_block; - break; - } - } - if (mempid_freeblock) { - list_del(&mempid_freeblock->list); - XGI_INFO - ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", - mempid_freeblock->pid); - kfree(mempid_freeblock); - } + return err; } -/* - * given a bus address, fid the pcie mem block - * uses the bus address as the key. - */ -struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, - unsigned long address) + +int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS) { - struct xgi_pcie_block *block; - int i; + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + u32 offset; + DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, + sizeof(offset)); - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - if (block->bus_addr == address) { - return block; - } - - if (block->page_table) { - for (i = 0; i < block->page_count; i++) { - unsigned long offset = block->bus_addr; - if ((address >= offset) - && (address < (offset + PAGE_SIZE))) { - return block; - } - } - } - } - - XGI_ERROR("could not find map for vm 0x%lx\n", address); - - return NULL; + return xgi_pcie_free(info, offset, filp); } + /** * xgi_find_pcie_virt * @address: GE HW address @@ -880,60 +296,43 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not * the same block */ -void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) +void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) { - struct xgi_pcie_block *block; - const unsigned long offset_in_page = address & (PAGE_SIZE - 1); + const unsigned long offset = address - info->pcie.base; - XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n", - address, offset_in_page); - - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", - block, block->hw_addr, block->size); - - if ((address >= block->hw_addr) - && (address < (block->hw_addr + block->size))) { - const unsigned long loc_in_pagetable = - (address - block->hw_addr) >> PAGE_SHIFT; - void *const ret = - (void *)(block->page_table[loc_in_pagetable]. - virt_addr + offset_in_page); - - XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT); - XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n", - loc_in_pagetable, - block->page_table[loc_in_pagetable].virt_addr); - XGI_INFO("return 0x%p\n", ret); - - return ret; - } - } - - XGI_ERROR("could not find map for vm 0x%lx\n", address); - return NULL; + return ((u8 *) info->dev->sg->virtual) + offset; } /* address -- GE hw address */ -void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address) +int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) { + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + u32 address; u32 *virtaddr = 0; - XGI_INFO("input GE HW addr is 0x%x\n", address); + DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data, + sizeof(address)); + + DRM_INFO("input GE HW addr is 0x%x\n", address); if (address == 0) { - return; + return DRM_ERR(EFAULT); } virtaddr = (u32 *)xgi_find_pcie_virt(info, address); - XGI_INFO("convert to CPU virt addr 0x%p\n", virtaddr); + DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr); if (virtaddr != NULL) { - XGI_INFO("original [virtaddr] = 0x%x\n", *virtaddr); + DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr); *virtaddr = 0x00f00fff; - XGI_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); + DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); + } else { + return DRM_ERR(EFAULT); } + + return 0; } diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h deleted file mode 100644 index b66d6a28..00000000 --- a/linux-core/xgi_pcie.h +++ /dev/null @@ -1,68 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_PCIE_H_ -#define _XGI_PCIE_H_ - -#ifndef XGI_PCIE_ALLOC_MAX_ORDER -#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ -#endif - -struct xgi_page_block { - struct xgi_page_block *next; - unsigned long phys_addr; - unsigned long virt_addr; - unsigned long page_count; - unsigned long page_order; -}; - -struct xgi_pcie_block { - struct list_head list; - unsigned long offset; /* block's offset in pcie memory, begin from 0 */ - unsigned long size; /* The block size. */ - unsigned long bus_addr; /* CPU access address/bus address */ - unsigned long hw_addr; /* GE access address */ - - unsigned long page_count; - unsigned long page_order; - struct xgi_page_block *page_block; - struct xgi_pte *page_table; /* list of physical pages allocated */ - - atomic_t use_count; - enum PcieOwner owner; - unsigned long processID; -}; - -struct xgi_pcie_heap { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; -}; - -#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index bc3e2a1e..b211626a 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -29,269 +29,100 @@ #ifndef _XGI_REGS_H_ #define _XGI_REGS_H_ -#ifndef XGI_MMIO -#define XGI_MMIO 1 -#endif +#include "drmP.h" +#include "drm.h" -#if XGI_MMIO -#define OUTB(port, value) writeb(value, info->mmio.vbase + port) -#define INB(port) readb(info->mmio.vbase + port) -#define OUTW(port, value) writew(value, info->mmio.vbase + port) -#define INW(port) readw(info->mmio.vbase + port) -#define OUTDW(port, value) writel(value, info->mmio.vbase + port) -#define INDW(port) readl(info->mmio.vbase + port) -#else -#define OUTB(port, value) outb(value, port) -#define INB(port) inb(port) -#define OUTW(port, value) outw(value, port) -#define INW(port) inw(port) -#define OUTDW(port, value) outl(value, port) -#define INDW(port) inl(port) -#endif /* Hardware access functions */ -static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data) +static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data) { - OUTB(0x3C4, index); - OUTB(0x3C5, data); + DRM_WRITE8(map, 0x3C4, index); + DRM_WRITE8(map, 0x3C5, data); } -static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data) +static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data) { - OUTB(0x3D4, index); - OUTB(0x3D5, data); + DRM_WRITE8(map, 0x3D4, index); + DRM_WRITE8(map, 0x3D5, data); } -static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data) +static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data) { - OUTB(0x3CE, index); - OUTB(0x3CF, data); + DRM_WRITE8(map, 0x3CE, index); + DRM_WRITE8(map, 0x3CF, data); } -static inline u8 IN3C5B(struct xgi_info * info, u8 index) +static inline u8 IN3C5B(struct drm_map * map, u8 index) { - volatile u8 data = 0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; + DRM_WRITE8(map, 0x3C4, index); + return DRM_READ8(map, 0x3C5); } -static inline u8 IN3X5B(struct xgi_info * info, u8 index) +static inline u8 IN3X5B(struct drm_map * map, u8 index) { - volatile u8 data = 0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; + DRM_WRITE8(map, 0x3D4, index); + return DRM_READ8(map, 0x3D5); } -static inline u8 IN3CFB(struct xgi_info * info, u8 index) +static inline u8 IN3CFB(struct drm_map * map, u8 index) { - volatile u8 data = 0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; + DRM_WRITE8(map, 0x3CE, index); + return DRM_READ8(map, 0x3CF); } -static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data) -{ - OUTB(0x3C4, index); - OUTB(0x3C5, data); -} - -static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data) -{ - OUTB(0x3D4, index); - OUTB(0x3D5, data); -} - -static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data) -{ - OUTB(0x3CE, index); - OUTB(0x3CF, data); -} - -static inline u8 IN3C5W(struct xgi_info * info, u8 index) -{ - volatile u8 data = 0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; -} - -static inline u8 IN3X5W(struct xgi_info * info, u8 index) -{ - volatile u8 data = 0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; -} - -static inline u8 IN3CFW(struct xgi_info * info, u8 index) -{ - volatile u8 data = 0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; -} - -static inline u8 readAttr(struct xgi_info * info, u8 index) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - return INB(0x3C1); -} - -static inline void writeAttr(struct xgi_info * info, u8 index, u8 value) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - OUTB(0x3C0, value); -} /* * Graphic engine register (2d/3d) acessing interface */ -static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data) +static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) { - XGI_INFO("mmio vbase = 0x%p, addr = 0x%x, data = 0x%x\n", - info->mmio->vbase, addr, data); + DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", + map->handle, addr, data); - *(volatile u32 *)(info->mmio.vbase + addr) = (data); + DRM_WRITE32(map, addr, data); } -static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data) -{ - *(volatile u16 *)(info->mmio.vbase + addr) = (data); -} - -static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data) -{ - *(volatile u8 *)(info->mmio.vbase + addr) = (data); -} - -static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr) -{ - volatile u32 data; - data = *(volatile u32 *)(info->mmio.vbase + addr); - return data; -} - -static inline u16 ReadRegWord(struct xgi_info * info, u32 addr) -{ - volatile u16 data; - data = *(volatile u16 *)(info->mmio.vbase + addr); - return data; -} - -static inline u8 ReadRegByte(struct xgi_info * info, u32 addr) -{ - volatile u8 data; - data = *(volatile u8 *)(info->mmio.vbase + addr); - return data; -} - -#if 0 -extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data); -extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data); -extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data); -extern u8 IN3C5B(struct xgi_info * info, u8 index); -extern u8 IN3X5B(struct xgi_info * info, u8 index); -extern u8 IN3CFB(struct xgi_info * info, u8 index); -extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data); -extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data); -extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data); -extern u8 IN3C5W(struct xgi_info * info, u8 index); -extern u8 IN3X5W(struct xgi_info * info, u8 index); -extern u8 IN3CFW(struct xgi_info * info, u8 index); - -extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data); -extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data); -extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data); -extern u32 ReadRegDWord(struct xgi_info * info, u32 addr); -extern u16 ReadRegWord(struct xgi_info * info, u32 addr); -extern u8 ReadRegByte(struct xgi_info * info, u32 addr); - -extern void EnableProtect(); -extern void DisableProtect(); -#endif - -#define Out(port, data) OUTB(port, data) -#define bOut(port, data) OUTB(port, data) -#define wOut(port, data) OUTW(port, data) -#define dwOut(port, data) OUTDW(port, data) - -#define Out3x5(index, data) OUT3X5B(info, index, data) -#define bOut3x5(index, data) OUT3X5B(info, index, data) -#define wOut3x5(index, data) OUT3X5W(info, index, data) - -#define Out3c5(index, data) OUT3C5B(info, index, data) -#define bOut3c5(index, data) OUT3C5B(info, index, data) -#define wOut3c5(index, data) OUT3C5W(info, index, data) - -#define Out3cf(index, data) OUT3CFB(info, index, data) -#define bOut3cf(index, data) OUT3CFB(info, index, data) -#define wOut3cf(index, data) OUT3CFW(info, index, data) - -#define In(port) INB(port) -#define bIn(port) INB(port) -#define wIn(port) INW(port) -#define dwIn(port) INDW(port) - -#define In3x5(index) IN3X5B(info, index) -#define bIn3x5(index) IN3X5B(info, index) -#define wIn3x5(index) IN3X5W(info, index) - -#define In3c5(index) IN3C5B(info, index) -#define bIn3c5(index) IN3C5B(info, index) -#define wIn3c5(index) IN3C5W(info, index) - -#define In3cf(index) IN3CFB(info, index) -#define bIn3cf(index) IN3CFB(info, index) -#define wIn3cf(index) IN3CFW(info, index) - -#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) -#define wWriteReg(addr, data) WriteRegWord(info, addr, data) -#define bWriteReg(addr, data) WriteRegByte(info, addr, data) -#define dwReadReg(addr) ReadRegDWord(info, addr) -#define wReadReg(addr) ReadRegWord(info, addr) -#define bReadReg(addr) ReadRegByte(info, addr) static inline void xgi_enable_mmio(struct xgi_info * info) { u8 protect = 0; + u8 temp; /* Unprotect registers */ - outb(0x11, 0x3C4); - protect = inb(0x3C5); - outb(0x92, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); - outb(0x3A, 0x3D4); - outb(inb(0x3D5) | 0x20, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); /* Enable MMIO */ - outb(0x39, 0x3D4); - outb(inb(0x3D5) | 0x01, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); - OUTB(0x3C4, 0x11); - OUTB(0x3C5, protect); + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); } static inline void xgi_disable_mmio(struct xgi_info * info) { u8 protect = 0; + u8 temp; - /* unprotect registers */ - OUTB(0x3C4, 0x11); - protect = INB(0x3C5); - OUTB(0x3C5, 0x92); + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); /* Disable MMIO access */ - OUTB(0x3D4, 0x39); - OUTB(0x3D5, INB(0x3D5) & 0xFE); + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); /* Protect registers */ - outb(0x11, 0x3C4); - outb(protect, 0x3C5); + OUT3C5B(info->mmio_map, 0x11, protect); } static inline void xgi_enable_ge(struct xgi_info * info) @@ -300,36 +131,36 @@ static inline void xgi_enable_ge(struct xgi_info * info) int wait = 0; // Enable GE - OUTW(0x3C4, 0x9211); + DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211); // Save and close dynamic gating - bOld3cf2a = bIn3cf(0x2a); - bOut3cf(0x2a, bOld3cf2a & 0xfe); + bOld3cf2a = IN3CFB(info->mmio_map, 0x2a); + OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe); // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x94); + OUT3X5B(info->mmio_map, 0x36, 0x94); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } // Enable 2D engine only - bOut3x5(0x36, 0x80); + OUT3X5B(info->mmio_map, 0x36, 0x80); // Enable 2D+3D engine - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); // Restore dynamic gating - bOut3cf(0x2a, bOld3cf2a); + OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a); } static inline void xgi_disable_ge(struct xgi_info * info) @@ -337,50 +168,50 @@ static inline void xgi_disable_ge(struct xgi_info * info) int wait = 0; // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x94); + OUT3X5B(info->mmio_map, 0x36, 0x94); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } // Disable 2D engine only - bOut3x5(0x36, 0); + OUT3X5B(info->mmio_map, 0x36, 0); } static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) { - Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 - Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 - Out3cf(0x39, In3cf(0x39) | 0x02); + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01); //Set 3cf.39 bit 0 to 1 + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02); } static inline void xgi_disable_dvi_interrupt(struct xgi_info * info) { - Out3cf(0x39, In3cf(0x39) & ~0x02); + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02); } static inline void xgi_enable_crt1_interrupt(struct xgi_info * info) { - Out3cf(0x3d, In3cf(0x3d) | 0x04); - Out3cf(0x3d, In3cf(0x3d) & ~0x04); - Out3cf(0x3d, In3cf(0x3d) | 0x08); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08); } static inline void xgi_disable_crt1_interrupt(struct xgi_info * info) { - Out3cf(0x3d, In3cf(0x3d) & ~0x08); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08); } #endif diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index 67118884..66cb4efb 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -44,6 +44,15 @@ struct drm_xgi_sarea { unsigned int scrn_pitch; }; + +struct xgi_bootstrap { + /** + * Size of PCI-e GART range in megabytes. + */ + unsigned int gart_size; +}; + + enum xgi_mem_location { XGI_MEMLOC_NON_LOCAL = 0, XGI_MEMLOC_LOCAL = 1, @@ -62,9 +71,9 @@ struct xgi_mem_alloc { __u32 hw_addr; /** - * Physical address of the memory from the processor's point of view. + * Offset of the allocation in the mapping. */ - unsigned long bus_addr; + unsigned long offset; }; enum xgi_batch_type { @@ -95,38 +104,31 @@ struct xgi_state_info { * Ioctl definitions */ -#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ +#define DRM_XGI_BOOTSTRAP 0 +#define DRM_XGI_FB_ALLOC 1 +#define DRM_XGI_FB_FREE 2 +#define DRM_XGI_PCIE_ALLOC 3 +#define DRM_XGI_PCIE_FREE 4 +#define DRM_XGI_SUBMIT_CMDLIST 5 +#define DRM_XGI_GE_RESET 6 +#define DRM_XGI_DUMP_REGISTER 7 +#define DRM_XGI_DEBUG_INFO 8 +#define DRM_XGI_TEST_RWINKERNEL 9 +#define DRM_XGI_STATE_CHANGE 10 -#define XGI_IOCTL_BASE 0 -#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 0) +#define XGI_IOCTL_BOOTSTRAP DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap) -#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 1) -#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 2) -#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 3) -#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 4) -#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 5) -#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 6) -#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 7) -#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 8) -#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 9) -#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 10) +#define XGI_IOCTL_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_FB_ALLOC, struct xgi_mem_alloc) +#define XGI_IOCTL_FB_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FB_FREE, __u32) -#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) +#define XGI_IOCTL_PCIE_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_PCIE_ALLOC, struct xgi_mem_alloc) +#define XGI_IOCTL_PCIE_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_PCIE_FREE, __u32) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) - -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) - -#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) -#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) - -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) - -#define XGI_IOCTL_MAXNR 30 +#define XGI_IOCTL_GE_RESET DRM_IO(DRM_COMMAND_BASE + DRM_XGI_GE_RESET) +#define XGI_IOCTL_DUMP_REGISTER DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DEBUG_INFO) +#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info) +#define XGI_IOCTL_TEST_RWINKERNEL DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_TEST_RWINKERNEL, __u32) +#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info) #endif /* _XGI_DRM_H_ */ From 2f53ce4af2f7db911d908ff382738f30be004e8b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 11:05:13 -0700 Subject: [PATCH 153/437] Move MMIO drm_addmap (and code that depends on it) to xgi_bootstrap. For reasons that I don't understand, the drm_addmap call would succeed in xgi_driver_load, but writes to the map later would oops. Moving it to xgi_bootstrap fixes this problem. --- linux-core/xgi_drv.c | 62 ++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 3b9f4cb1..13e79169 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -188,7 +188,38 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) return 0; } + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; + } + xgi_enable_mmio(info); + //xgi_enable_ge(info); + + info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; + + DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", + (unsigned long) info->fb.base, info->fb.size); + + + if ((info->fb.base == 0) || (info->fb.size == 0)) { + DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + return DRM_ERR(EINVAL); + } + + + /* Init the resource manager */ + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("xgi_fb_heap_init() failed\n"); + return err; + } + + info->pcie.size = bs.gart_size * (1024 * 1024); @@ -280,36 +311,12 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) } - err = drm_addmap(dev, info->mmio.base, info->mmio.size, - _DRM_REGISTERS, _DRM_KERNEL | _DRM_READ_ONLY, - &info->mmio_map); - if (err) { - DRM_ERROR("Unable to map MMIO region: %d\n", err); - return err; - } - - xgi_enable_mmio(info); - //xgi_enable_ge(info); - info->fb.base = drm_get_resource_start(dev, 0); info->fb.size = drm_get_resource_len(dev, 0); DRM_INFO("fb base: 0x%lx, size: 0x%x\n", (unsigned long) info->fb.base, info->fb.size); - info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; - - DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", - (unsigned long) info->fb.base, info->fb.size); - - - if ((info->fb.base == 0) || (info->fb.size == 0)) { - DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", - (unsigned long) info->fb.base, info->fb.size); - return DRM_ERR(EINVAL); - } - - xgi_mem_block_cache = kmem_cache_create("xgi_mem_block", sizeof(struct xgi_mem_block), @@ -321,13 +328,6 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) } - /* Init the resource manager */ - err = xgi_fb_heap_init(info); - if (err) { - DRM_ERROR("xgi_fb_heap_init() failed\n"); - return err; - } - return 0; } From 15245b670e5359a7dbf9151aa9f160e929e0b46b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 11:38:56 -0700 Subject: [PATCH 154/437] Rework xgi_(pcie|fb)_free_all to prevent deadlock. --- linux-core/xgi_fb.c | 2 +- linux-core/xgi_pcie.c | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index ce689847..a5885198 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -365,7 +365,7 @@ void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp) break; } - (void) xgi_fb_free(info, block->offset, filp); + (void) xgi_mem_free(&info->fb_heap, block->offset, filp); } while(1); up(&info->fb_sem); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 49c531fc..9dee888b 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -34,6 +34,9 @@ static struct xgi_mem_block *xgi_pcie_vertex_block = NULL; static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; +static int xgi_pcie_free_locked(struct xgi_info * info, + unsigned long offset, DRMFILE filp); + static int xgi_pcie_lut_init(struct xgi_info * info) { u8 temp = 0; @@ -248,30 +251,39 @@ void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) break; } - (void) xgi_pcie_free(info, block->offset, filp); + (void) xgi_pcie_free_locked(info, block->offset, filp); } while(1); up(&info->pcie_sem); } -int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +int xgi_pcie_free_locked(struct xgi_info * info, + unsigned long offset, DRMFILE filp) { const bool isvertex = (xgi_pcie_vertex_block && (xgi_pcie_vertex_block->offset == offset)); + int err = xgi_mem_free(&info->pcie_heap, offset, filp); + + if (!err && isvertex) + xgi_pcie_vertex_block = NULL; + + return err; +} + + +int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +{ int err; down(&info->pcie_sem); - err = xgi_mem_free(&info->pcie_heap, offset, filp); + err = xgi_pcie_free_locked(info, offset, filp); up(&info->pcie_sem); if (err) { DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); } - if (isvertex) - xgi_pcie_vertex_block = NULL; - return err; } From a33f5487296eacf503f5b27ba829f5fbdae8e63b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 19:05:52 -0700 Subject: [PATCH 155/437] Debug message and comment clean up in xgi_submit_cmdlist. --- linux-core/xgi_cmdlist.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index d2018057..2fdfcc91 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -66,58 +66,43 @@ static void xgi_submit_cmdlist(struct xgi_info * info, { const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); - DRM_INFO("After getCurBatchBeginPort()\n"); if (s_cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + beginPort; - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ - // Enable PCI Trigger Mode + /* Enable PCI Trigger Mode + */ DRM_INFO("Enable PCI Trigger Mode \n"); - - /* Jong 06/14/2006; 0x400001a */ dwWriteReg(info->mmio_map, BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | M2REG_CLEAR_COUNTERS_MASK | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); - /* Jong 06/14/2006; 0x400000a */ dwWriteReg(info->mmio_map, BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); - // Send PCI begin command - DRM_INFO("Send PCI begin command \n"); + /* Send PCI begin command + */ DRM_INFO("portOffset=%d, beginPort=%d\n", portOffset, beginPort); - /* beginPort = 48; */ - /* 0xc100000 */ dwWriteReg(info->mmio_map, portOffset, (beginPort << 22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - DRM_INFO("Send PCI begin command- After\n"); - - /* 0x80000024 */ dwWriteReg(info->mmio_map, portOffset + 4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - /* 0x1010000 */ dwWriteReg(info->mmio_map, portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); - /* Jong 06/12/2006; system hang; marked for test */ dwWriteReg(info->mmio_map, portOffset + 12, 0); - - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ } else { u32 *lastBatchVirtAddr; @@ -154,7 +139,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - DRM_INFO("End\n"); + DRM_INFO("%s: exit\n", __func__); } From 970674f4867d65bd16cf3585d46930b72a827cce Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 19:08:47 -0700 Subject: [PATCH 156/437] Fix error handing related to xgi_cmdlist_initialize. xgi_cmdlist_initialize wasn't correctly checking for errors from xgi_pcie_alloc. Furthermore, xgi_bootstrap, the one caller of xgi_cmdlist_initialize, wasn't check its return value. --- linux-core/xgi_cmdlist.c | 10 +++++----- linux-core/xgi_drv.c | 6 +++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 2fdfcc91..885b5066 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -45,11 +45,11 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) .size = size, .owner = PCIE_2D, }; + int err; - xgi_pcie_alloc(info, &mem_alloc, 0); - - if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { - return -1; + err = xgi_pcie_alloc(info, &mem_alloc, 0); + if (err) { + return err; } s_cmdring._cmdRingSize = mem_alloc.size; @@ -58,7 +58,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) s_cmdring._lastBatchStartAddr = 0; s_cmdring._cmdRingOffset = 0; - return 1; + return 0; } static void xgi_submit_cmdlist(struct xgi_info * info, diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 13e79169..c4e7daae 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -231,7 +231,11 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) } /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - xgi_cmdlist_initialize(info, 0x100000); + err = xgi_cmdlist_initialize(info, 0x100000); + if (err) { + DRM_ERROR("xgi_cmdlist_initialize() failed\n"); + return err; + } info->bootstrap_done = 1; return 0; From 56665a42f470d5cf8cb4865558cb658dff15a9dd Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 19:09:24 -0700 Subject: [PATCH 157/437] Delete unused variable in xgi_driver_load. --- linux-core/xgi_drv.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index c4e7daae..bcb6946d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -286,11 +286,8 @@ void xgi_kern_isr_bh(struct drm_device *dev) int xgi_driver_load(struct drm_device *dev, unsigned long flags) { - struct xgi_info *info; - int err; + struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); - - info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); if (!info) return DRM_ERR(ENOMEM); From 6bd848307485f678915913f282e2ea59ae3ca1a8 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 20 Jul 2007 10:57:40 -0700 Subject: [PATCH 158/437] Change handling of begin types slightly. Moved the getCurBatchBeginPort before its only caller. Modified function to return the command ID instead of the port offset. Function also now assumes input begin type is value. Added code to ioctl handler to validate begin type. --- linux-core/xgi_cmdlist.c | 56 ++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 885b5066..6cc4c142 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -34,7 +34,7 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); -static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); +static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info, unsigned int triggerCounter); static void xgi_cmdlist_reset(void); @@ -61,14 +61,33 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return 0; } -static void xgi_submit_cmdlist(struct xgi_info * info, - struct xgi_cmd_info * pCmdInfo) + +/** + * get_batch_command - Get the command ID for the current begin type. + * @type: Type of the current batch + * + * See section 3.2.2 "Begin" (page 15) of the 3D SPG. + * + * This function assumes that @type is on the range [0,3]. + */ +unsigned int get_batch_command(enum xgi_batch_type type) { - const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); + static const unsigned int ports[4] = { + 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2 + }; + + return ports[type]; +} + + +static void xgi_submit_cmdlist(struct xgi_info * info, + const struct xgi_cmd_info * pCmdInfo) +{ + const unsigned int cmd = get_batch_command(pCmdInfo->_firstBeginType); if (s_cmdring._lastBatchStartAddr == 0) { - const unsigned int portOffset = BASE_3D_ENG + beginPort; + const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); /* Enable PCI Trigger Mode @@ -90,10 +109,10 @@ static void xgi_submit_cmdlist(struct xgi_info * info, /* Send PCI begin command */ DRM_INFO("portOffset=%d, beginPort=%d\n", - portOffset, beginPort); + portOffset, cmd << 2); dwWriteReg(info->mmio_map, portOffset, - (beginPort << 22) + (BEGIN_VALID_MASK) + + (cmd << 24) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); dwWriteReg(info->mmio_map, portOffset + 4, @@ -128,7 +147,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, lastBatchVirtAddr[3] = 0; //barrier(); lastBatchVirtAddr[0] = - (beginPort << 22) + (BEGIN_VALID_MASK) + + (cmd << 24) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID); /* Jong 06/12/2006; system hang; marked for test */ @@ -153,6 +172,10 @@ int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) (struct xgi_cmd_info __user *) data, sizeof(cmd_list)); + if (cmd_list._firstBeginType > BTYPE_CTRL) { + return DRM_ERR(EINVAL); + } + xgi_submit_cmdlist(info, &cmd_list); return 0; } @@ -238,23 +261,6 @@ static void triggerHWCommandList(struct xgi_info * info, } } -static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) -{ - // Convert the batch type to begin port ID - switch (pCmdInfo->_firstBeginType) { - case BTYPE_2D: - return 0x30; - case BTYPE_3D: - return 0x40; - case BTYPE_FLIP: - return 0x50; - case BTYPE_CTRL: - return 0x20; - default: - //ASSERT(0); - return 0xff; - } -} static void addFlush2D(struct xgi_info * info) { From 659209cb2d59c7b25df58d130d0649f8f899b693 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 20 Jul 2007 11:29:16 -0700 Subject: [PATCH 159/437] Clean up generation of begin commands in xgi_submit_cmdlist Generate the begin command once in a temporary buffer. Then, depending on whether the command is to be written directly to the hardware or to a secondary buffer, copy to command to the correct place. --- linux-core/xgi_cmdlist.c | 47 +++++++++++++++------------------------- linux-core/xgi_cmdlist.h | 1 + 2 files changed, 18 insertions(+), 30 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 6cc4c142..682c4ac1 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -84,8 +84,15 @@ static void xgi_submit_cmdlist(struct xgi_info * info, const struct xgi_cmd_info * pCmdInfo) { const unsigned int cmd = get_batch_command(pCmdInfo->_firstBeginType); + u32 begin[4]; + begin[0] = (cmd << 24) | (BEGIN_VALID_MASK) | + (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->_curDebugID); + begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->_firstSize; + begin[2] = pCmdInfo->_firstBeginAddr >> 4; + begin[3] = 0; + if (s_cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); @@ -111,17 +118,10 @@ static void xgi_submit_cmdlist(struct xgi_info * info, DRM_INFO("portOffset=%d, beginPort=%d\n", portOffset, cmd << 2); - dwWriteReg(info->mmio_map, portOffset, - (cmd << 24) + (BEGIN_VALID_MASK) + - pCmdInfo->_curDebugID); - - dwWriteReg(info->mmio_map, portOffset + 4, - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - - dwWriteReg(info->mmio_map, portOffset + 8, - (pCmdInfo->_firstBeginAddr >> 4)); - - dwWriteReg(info->mmio_map, portOffset + 12, 0); + dwWriteReg(info->mmio_map, portOffset, begin[0]); + dwWriteReg(info->mmio_map, portOffset + 4, begin[1]); + dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); + dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); } else { u32 *lastBatchVirtAddr; @@ -135,26 +135,13 @@ static void xgi_submit_cmdlist(struct xgi_info * info, xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - /* lastBatchVirtAddr should *never* be NULL. However, there - * are currently some bugs that cause this to happen. The - * if-statement here prevents some fatal (i.e., hard lock - * requiring the reset button) oopses. - */ - if (lastBatchVirtAddr) { - lastBatchVirtAddr[1] = - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; - lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; - lastBatchVirtAddr[3] = 0; - //barrier(); - lastBatchVirtAddr[0] = - (cmd << 24) + (BEGIN_VALID_MASK) + - (0xffff & pCmdInfo->_curDebugID); + lastBatchVirtAddr[1] = begin[1]; + lastBatchVirtAddr[2] = begin[2]; + lastBatchVirtAddr[3] = begin[3]; + wmb(); + lastBatchVirtAddr[0] = begin[0]; - /* Jong 06/12/2006; system hang; marked for test */ - triggerHWCommandList(info, pCmdInfo->_beginCount); - } else { - DRM_ERROR("lastBatchVirtAddr is NULL\n"); - } + triggerHWCommandList(info, pCmdInfo->_beginCount); } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 4bc56ec1..08029386 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -40,6 +40,7 @@ #define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) #define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) #define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define BEGIN_BEGIN_IDENTIFICATION_MASK (TWENTY_BIT_MASK<<0) #define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 typedef enum { From ed82d5398a751cf755cf4168cbb79b181facc86f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 20 Jul 2007 11:31:01 -0700 Subject: [PATCH 160/437] Clean up flush command generation in addFlush2D. --- linux-core/xgi_cmdlist.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 682c4ac1..b93541f3 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -280,11 +280,9 @@ static void addFlush2D(struct xgi_info * info) lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; lastBatchVirtAddr[3] = 0; - - //barrier(); - - // BTYPE_CTRL & NO debugID - lastBatchVirtAddr[0] = (0x20 << 22) + (BEGIN_VALID_MASK); + wmb(); + lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK); triggerHWCommandList(info, 1); From 5dc9fd96d7bf48003db832f145ad8acb4bcb73b4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 20 Jul 2007 12:55:51 -0700 Subject: [PATCH 161/437] Fix linux spinlock macros after the last commit. --- linux-core/drmP.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 575e6255..af859c38 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1266,12 +1266,12 @@ static inline void drm_ctl_free(void *pt, size_t size, int area) * Initialize the lock for use. name is an optional string describing the * lock */ -#define DRM_SPININIT(l,name) spin_lock_init(l); +#define DRM_SPININIT(l,name) spin_lock_init(l) #define DRM_SPINUNINIT(l) -#define DRM_SPINLOCK(l) spin_lock(l); -#define DRM_SPINUNLOCK(u) spin_unlock(l); -#define DRM_SPINLOCK_IRQSAVE(l, flags) spin_lock_irqflags(l, _flags); -#define DRM_SPINUNLOCK_IRQRESTORE(u, flags) spin_unlock_irqrestore(l, _flags); +#define DRM_SPINLOCK(l) spin_lock(l) +#define DRM_SPINUNLOCK(l) spin_unlock(l) +#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags); +#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); #define DRM_SPINLOCK_ASSERT(l) do {} while (0) #endif /* __KERNEL__ */ From e39286eb5eab8846a228863abf8f1b8b07a9e29d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 17:00:17 -0700 Subject: [PATCH 162/437] Remove DRM_ERR OS macro. This was used to make all ioctl handlers return -errno on linux and errno on *BSD. Instead, just return -errno in shared code, and flip sign on return from shared code to *BSD code. --- bsd-core/drmP.h | 3 +- bsd-core/drm_auth.c | 6 +- bsd-core/drm_bufs.c | 84 ++++++++-------- bsd-core/drm_context.c | 12 +-- bsd-core/drm_dma.c | 4 +- bsd-core/drm_drv.c | 18 ++-- bsd-core/drm_fops.c | 4 +- bsd-core/drm_ioctl.c | 16 +-- bsd-core/drm_irq.c | 14 +-- bsd-core/drm_vm.c | 2 +- linux-core/drm_drawable.c | 6 +- linux-core/drm_ioctl.c | 2 +- linux-core/drm_lock.c | 2 +- linux-core/drm_os_linux.h | 1 - linux-core/i810_dma.c | 2 +- linux-core/nouveau_sgdma.c | 10 +- linux-core/sis_drv.c | 2 +- linux-core/sis_mm.c | 4 +- linux-core/via_buffer.c | 2 +- linux-core/via_dmablit.c | 32 +++--- linux-core/via_fence.c | 2 +- linux-core/via_mm.c | 6 +- shared-core/i915_dma.c | 72 +++++++------- shared-core/i915_irq.c | 32 +++--- shared-core/i915_mem.c | 28 +++--- shared-core/mach64_dma.c | 50 +++++----- shared-core/mach64_drv.h | 18 ++-- shared-core/mach64_state.c | 36 +++---- shared-core/mga_dma.c | 42 ++++---- shared-core/mga_drv.h | 4 +- shared-core/mga_state.c | 36 +++---- shared-core/mga_warp.c | 8 +- shared-core/nouveau_fifo.c | 10 +- shared-core/nouveau_mem.c | 18 ++-- shared-core/nouveau_notifier.c | 10 +- shared-core/nouveau_object.c | 58 +++++------ shared-core/nouveau_state.c | 12 +-- shared-core/nv04_instmem.c | 6 +- shared-core/nv20_graph.c | 4 +- shared-core/nv30_graph.c | 6 +- shared-core/nv40_graph.c | 6 +- shared-core/nv50_fifo.c | 6 +- shared-core/nv50_graph.c | 2 +- shared-core/nv50_instmem.c | 16 +-- shared-core/r128_cce.c | 54 +++++------ shared-core/r128_drv.h | 2 +- shared-core/r128_state.c | 100 +++++++++---------- shared-core/r300_cmdbuf.c | 60 ++++++------ shared-core/radeon_cp.c | 50 +++++----- shared-core/radeon_irq.c | 12 +-- shared-core/radeon_mem.c | 26 ++--- shared-core/radeon_state.c | 172 ++++++++++++++++----------------- shared-core/savage_bci.c | 60 ++++++------ shared-core/savage_state.c | 96 +++++++++--------- shared-core/sis_mm.c | 28 +++--- shared-core/via_dma.c | 44 ++++----- shared-core/via_irq.c | 14 +-- shared-core/via_map.c | 2 +- shared-core/via_verifier.c | 8 +- 59 files changed, 723 insertions(+), 719 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index ff8ad473..e9271ff7 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -377,7 +377,6 @@ typedef vaddr_t vm_offset_t; #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) -#define DRM_ERR(v) v #define DRM_HZ hz #define DRM_UDELAY(udelay) DELAY(udelay) #define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */ @@ -415,7 +414,7 @@ for ( ret = 0 ; !ret && !(condition) ; ) { \ DRM_UNLOCK(); \ mtx_lock(&dev->irq_lock); \ if (!(condition)) \ - ret = msleep(&(queue), &dev->irq_lock, \ + ret = -msleep(&(queue), &dev->irq_lock, \ PZERO | PCATCH, "drmwtq", (timeout)); \ mtx_unlock(&dev->irq_lock); \ DRM_LOCK(); \ diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c index aa0e29c0..556bf891 100644 --- a/bsd-core/drm_auth.c +++ b/bsd-core/drm_auth.c @@ -66,7 +66,7 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) hash = drm_hash_magic(magic); entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT); - if (!entry) return DRM_ERR(ENOMEM); + if (!entry) return ENOMEM; entry->magic = magic; entry->priv = priv; entry->next = NULL; @@ -112,7 +112,7 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) DRM_UNLOCK(); free(pt, M_DRM); - return DRM_ERR(EINVAL); + return EINVAL; } int drm_getmagic(DRM_IOCTL_ARGS) @@ -168,5 +168,5 @@ int drm_authmagic(DRM_IOCTL_ARGS) drm_remove_magic(dev, auth.magic); return 0; } - return DRM_ERR(EINVAL); + return EINVAL; } diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index 343ab1e8..de28a2cf 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -149,7 +149,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, */ map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT); if ( !map ) - return DRM_ERR(ENOMEM); + return ENOMEM; map->offset = offset; map->size = size; @@ -172,7 +172,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, map->size, drm_order(map->size), map->handle ); if ( !map->handle ) { free(map, M_DRM); - return DRM_ERR(ENOMEM); + return ENOMEM; } map->offset = (unsigned long)map->handle; if ( map->flags & _DRM_CONTAINS_LOCK ) { @@ -182,7 +182,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, DRM_UNLOCK(); free(map->handle, M_DRM); free(map, M_DRM); - return DRM_ERR(EBUSY); + return EBUSY; } dev->lock.hw_lock = map->handle; /* Pointer to lock */ DRM_UNLOCK(); @@ -202,13 +202,13 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, } if (!valid) { free(map, M_DRM); - return DRM_ERR(EACCES); + return EACCES; }*/ break; case _DRM_SCATTER_GATHER: if (!dev->sg) { free(map, M_DRM); - return DRM_ERR(EINVAL); + return EINVAL; } map->offset = map->offset + dev->sg->handle; break; @@ -225,7 +225,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful); if (map->dmah == NULL) { free(map, M_DRM); - return DRM_ERR(ENOMEM); + return ENOMEM; } map->handle = map->dmah->vaddr; map->offset = map->dmah->busaddr; @@ -233,7 +233,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, default: DRM_ERROR("Bad map type %d\n", map->type); free(map, M_DRM); - return DRM_ERR(EINVAL); + return EINVAL; } DRM_LOCK(); @@ -258,12 +258,12 @@ int drm_addmap_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; if (!(dev->flags & (FREAD|FWRITE))) - return DRM_ERR(EACCES); /* Require read/write */ + return EACCES; /* Require read/write */ DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data, sizeof(drm_map_t)); if (!DRM_SUSER(p) && request.type != _DRM_AGP) - return DRM_ERR(EACCES); + return EACCES; DRM_LOCK(); err = drm_addmap(dev, request.offset, request.size, request.type, @@ -351,7 +351,7 @@ int drm_rmmap_ioctl(DRM_IOCTL_ARGS) /* No match found. */ if (map == NULL) { DRM_UNLOCK(); - return DRM_ERR(EINVAL); + return EINVAL; } drm_rmmap(dev, map); @@ -441,7 +441,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) } if (!valid) { DRM_DEBUG("zone invalid\n"); - return DRM_ERR(EINVAL); + return EINVAL; }*/ entry = &dma->bufs[order]; @@ -449,7 +449,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM, M_NOWAIT | M_ZERO); if ( !entry->buflist ) { - return DRM_ERR(ENOMEM); + return ENOMEM; } entry->buf_size = size; @@ -478,7 +478,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); - return DRM_ERR(ENOMEM); + return ENOMEM; } offset += alignment; @@ -494,7 +494,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); - return DRM_ERR(ENOMEM); + return ENOMEM; } dma->buflist = temp_buflist; @@ -563,7 +563,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) temp_pagelist == NULL) { free(entry->buflist, M_DRM); free(entry->seglist, M_DRM); - return DRM_ERR(ENOMEM); + return ENOMEM; } memcpy(temp_pagelist, dma->pagelist, dma->page_count * @@ -586,7 +586,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, M_DRM); - return DRM_ERR(ENOMEM); + return ENOMEM; } entry->seglist[entry->seg_count++] = dmah; @@ -621,7 +621,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, M_DRM); - return DRM_ERR(ENOMEM); + return ENOMEM; } DRM_DEBUG( "buffer %d @ %p\n", @@ -637,7 +637,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); free(temp_pagelist, M_DRM); - return DRM_ERR(ENOMEM); + return ENOMEM; } dma->buflist = temp_buflist; @@ -705,7 +705,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM, M_NOWAIT | M_ZERO); if (entry->buflist == NULL) - return DRM_ERR(ENOMEM); + return ENOMEM; entry->buf_size = size; entry->page_order = page_order; @@ -733,7 +733,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); - return DRM_ERR(ENOMEM); + return ENOMEM; } DRM_DEBUG( "buffer %d @ %p\n", @@ -752,7 +752,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); - return DRM_ERR(ENOMEM); + return ENOMEM; } dma->buflist = temp_buflist; @@ -781,21 +781,21 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) DRM_SPINLOCK(&dev->dma_lock); if (request->count < 0 || request->count > 4096) - return DRM_ERR(EINVAL); + return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return DRM_ERR(EINVAL); + return EINVAL; /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); - return DRM_ERR(EBUSY); + return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); - return DRM_ERR(ENOMEM); + return ENOMEM; } ret = drm_do_addbufs_agp(dev, request); @@ -812,24 +812,24 @@ int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) DRM_SPINLOCK(&dev->dma_lock); if (!DRM_SUSER(DRM_CURPROC)) - return DRM_ERR(EACCES); + return EACCES; if (request->count < 0 || request->count > 4096) - return DRM_ERR(EINVAL); + return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return DRM_ERR(EINVAL); + return EINVAL; /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); - return DRM_ERR(EBUSY); + return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); - return DRM_ERR(ENOMEM); + return ENOMEM; } ret = drm_do_addbufs_sg(dev, request); @@ -846,24 +846,24 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) DRM_SPINLOCK(&dev->dma_lock); if (!DRM_SUSER(DRM_CURPROC)) - return DRM_ERR(EACCES); + return EACCES; if (request->count < 0 || request->count > 4096) - return DRM_ERR(EINVAL); + return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return DRM_ERR(EINVAL); + return EINVAL; /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); - return DRM_ERR(EBUSY); + return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); - return DRM_ERR(ENOMEM); + return ENOMEM; } ret = drm_do_addbufs_pci(dev, request); @@ -928,7 +928,7 @@ int drm_infobufs(DRM_IOCTL_ARGS) if (DRM_COPY_TO_USER(&request.list[count], &from, sizeof(drm_buf_desc_t)) != 0) { - retcode = DRM_ERR(EFAULT); + retcode = EFAULT; break; } @@ -965,13 +965,13 @@ int drm_markbufs(DRM_IOCTL_ARGS) order = drm_order(request.size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || request.low_mark < 0 || request.high_mark < 0) { - return DRM_ERR(EINVAL); + return EINVAL; } DRM_SPINLOCK(&dev->dma_lock); if (request.low_mark > dma->bufs[order].buf_count || request.high_mark > dma->bufs[order].buf_count) { - return DRM_ERR(EINVAL); + return EINVAL; } dma->bufs[order].freelist.low_mark = request.low_mark; @@ -998,20 +998,20 @@ int drm_freebufs(DRM_IOCTL_ARGS) DRM_SPINLOCK(&dev->dma_lock); for ( i = 0 ; i < request.count ; i++ ) { if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) { - retcode = DRM_ERR(EFAULT); + retcode = EFAULT; break; } if ( idx < 0 || idx >= dma->buf_count ) { DRM_ERROR( "Index %d (of %d max)\n", idx, dma->buf_count - 1 ); - retcode = DRM_ERR(EINVAL); + retcode = EINVAL; break; } buf = dma->buflist[idx]; if ( buf->filp != filp ) { DRM_ERROR("Process %d freeing buffer not owned\n", DRM_CURRENTPID); - retcode = DRM_ERR(EINVAL); + retcode = EINVAL; break; } drm_free_buffer(dev, buf); @@ -1129,5 +1129,5 @@ int drm_mapbufs(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request)); - return DRM_ERR(retcode); + return retcode; } diff --git a/bsd-core/drm_context.c b/bsd-core/drm_context.c index 8e009540..e8df7df5 100644 --- a/bsd-core/drm_context.c +++ b/bsd-core/drm_context.c @@ -109,7 +109,7 @@ int drm_ctxbitmap_init(drm_device_t *dev) dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO); if ( dev->ctx_bitmap == NULL ) { DRM_UNLOCK(); - return DRM_ERR(ENOMEM); + return ENOMEM; } dev->context_sareas = NULL; dev->max_context = -1; @@ -148,7 +148,7 @@ int drm_getsareactx( DRM_IOCTL_ARGS ) DRM_LOCK(); if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) { DRM_UNLOCK(); - return DRM_ERR(EINVAL); + return EINVAL; } map = dev->context_sareas[request.ctx_id]; @@ -185,7 +185,7 @@ int drm_setsareactx( DRM_IOCTL_ARGS ) bad: DRM_UNLOCK(); - return DRM_ERR(EINVAL); + return EINVAL; } /* ================================================================ @@ -196,7 +196,7 @@ int drm_context_switch(drm_device_t *dev, int old, int new) { if ( test_and_set_bit( 0, &dev->context_flag ) ) { DRM_ERROR( "Reentering -- FIXME\n" ); - return DRM_ERR(EBUSY); + return EBUSY; } DRM_DEBUG( "Context switch from %d to %d\n", old, new ); @@ -239,7 +239,7 @@ int drm_resctx(DRM_IOCTL_ARGS) ctx.handle = i; if ( DRM_COPY_TO_USER( &res.contexts[i], &ctx, sizeof(ctx) ) ) - return DRM_ERR(EFAULT); + return EFAULT; } } res.count = DRM_RESERVED_CONTEXTS; @@ -265,7 +265,7 @@ int drm_addctx(DRM_IOCTL_ARGS) if ( ctx.handle == -1 ) { DRM_DEBUG( "Not enough free contexts.\n" ); /* Should this return -EBUSY instead? */ - return DRM_ERR(ENOMEM); + return ENOMEM; } if (dev->driver.context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) { diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c index 086a9fa2..90678dfc 100644 --- a/bsd-core/drm_dma.c +++ b/bsd-core/drm_dma.c @@ -38,7 +38,7 @@ int drm_dma_setup(drm_device_t *dev) dev->dma = malloc(sizeof(*dev->dma), M_DRM, M_NOWAIT | M_ZERO); if (dev->dma == NULL) - return DRM_ERR(ENOMEM); + return ENOMEM; DRM_SPININIT(&dev->dma_lock, "drmdma"); @@ -122,7 +122,7 @@ int drm_dma(DRM_IOCTL_ARGS) DRM_DEVICE; if (dev->driver.dma_ioctl) { - return dev->driver.dma_ioctl(kdev, cmd, data, flags, p, filp); + return -dev->driver.dma_ioctl(kdev, cmd, data, flags, p, filp); } else { DRM_DEBUG("DMA ioctl on driver with no dma handler\n"); return EINVAL; diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 75866b1b..fa2958b9 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -532,7 +532,8 @@ static int drm_load(drm_device_t *dev) if (dev->driver.load != NULL) { DRM_LOCK(); - retcode = dev->driver.load(dev, dev->id_entry->driver_private); + retcode = -dev->driver.load(dev, + dev->id_entry->driver_private); DRM_UNLOCK(); if (retcode != 0) goto error; @@ -544,7 +545,7 @@ static int drm_load(drm_device_t *dev) if (dev->driver.require_agp && dev->agp == NULL) { DRM_ERROR("Card isn't AGP, or couldn't initialize " "AGP.\n"); - retcode = DRM_ERR(ENOMEM); + retcode = ENOMEM; goto error; } if (dev->agp != NULL) { @@ -660,7 +661,7 @@ int drm_version(DRM_IOCTL_ARGS) name##_len = strlen( value ); \ if ( len && name ) { \ if ( DRM_COPY_TO_USER( name, value, len ) ) \ - return DRM_ERR(EFAULT); \ + return EFAULT; \ } version.version_major = dev->driver.major; @@ -755,7 +756,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) for (;;) { if ( !dev->lock.hw_lock ) { /* Device has been unregistered */ - retcode = DRM_ERR(EINTR); + retcode = EINTR; break; } if (drm_lock_take(&dev->lock.hw_lock->lock, @@ -914,13 +915,18 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, if (is_driver_ioctl) DRM_LOCK(); retcode = func(kdev, cmd, data, flags, p, filp); - if (is_driver_ioctl) + if (is_driver_ioctl) { DRM_UNLOCK(); + /* Driver ioctls in shared code follow the linux convention of + * returning -errno instead of errno. + */ + retcode = -retcode; + } if (retcode != 0) DRM_DEBUG(" returning %d\n", retcode); - return DRM_ERR(retcode); + return retcode; } drm_local_map_t *drm_getsarea(drm_device_t *dev) diff --git a/bsd-core/drm_fops.c b/bsd-core/drm_fops.c index f5c9349b..870e4d29 100644 --- a/bsd-core/drm_fops.c +++ b/bsd-core/drm_fops.c @@ -75,7 +75,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p, priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO); if (priv == NULL) { DRM_UNLOCK(); - return DRM_ERR(ENOMEM); + return ENOMEM; } #if __FreeBSD_version >= 500000 priv->uid = p->td_ucred->cr_svuid; @@ -93,7 +93,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p, priv->authenticated = DRM_SUSER(p); if (dev->driver.open) { - retcode = dev->driver.open(dev, priv); + retcode = -dev->driver.open(dev, priv); if (retcode != 0) { free(priv, M_DRM); DRM_UNLOCK(); diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index b5b5cf58..e450066c 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -48,7 +48,7 @@ int drm_getunique(DRM_IOCTL_ARGS) if (u.unique_len >= dev->unique_len) { if (DRM_COPY_TO_USER(u.unique, dev->unique, dev->unique_len)) - return DRM_ERR(EFAULT); + return EFAULT; } u.unique_len = dev->unique_len; @@ -71,15 +71,15 @@ int drm_setunique(DRM_IOCTL_ARGS) /* Check and copy in the submitted Bus ID */ if (!u.unique_len || u.unique_len > 1024) - return DRM_ERR(EINVAL); + return EINVAL; busid = malloc(u.unique_len + 1, M_DRM, M_WAITOK); if (busid == NULL) - return DRM_ERR(ENOMEM); + return ENOMEM; if (DRM_COPY_FROM_USER(busid, u.unique, u.unique_len)) { free(busid, M_DRM); - return DRM_ERR(EFAULT); + return EFAULT; } busid[u.unique_len] = '\0'; @@ -89,7 +89,7 @@ int drm_setunique(DRM_IOCTL_ARGS) ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func); if (ret != 3) { free(busid, M_DRM); - return DRM_ERR(EINVAL); + return EINVAL; } domain = bus >> 8; bus &= 0xff; @@ -99,14 +99,14 @@ int drm_setunique(DRM_IOCTL_ARGS) (slot != dev->pci_slot) || (func != dev->pci_func)) { free(busid, M_DRM); - return DRM_ERR(EINVAL); + return EINVAL; } /* Actually set the device's busid now. */ DRM_LOCK(); if (dev->unique_len || dev->unique) { DRM_UNLOCK(); - return DRM_ERR(EBUSY); + return EBUSY; } dev->unique_len = u.unique_len; @@ -158,7 +158,7 @@ int drm_getmap(DRM_IOCTL_ARGS) DRM_LOCK(); if (idx < 0) { DRM_UNLOCK(); - return DRM_ERR(EINVAL); + return EINVAL; } TAILQ_FOREACH(mapinlist, &dev->maplist, link) { diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 2a69e014..215eb0c9 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -76,14 +76,14 @@ int drm_irq_install(drm_device_t *dev) #endif if (dev->irq == 0 || dev->dev_private == NULL) - return DRM_ERR(EINVAL); + return EINVAL; DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq ); DRM_LOCK(); if (dev->irq_enabled) { DRM_UNLOCK(); - return DRM_ERR(EBUSY); + return EBUSY; } dev->irq_enabled = 1; @@ -157,7 +157,7 @@ int drm_irq_uninstall(drm_device_t *dev) #endif if (!dev->irq_enabled) - return DRM_ERR(EINVAL); + return EINVAL; dev->irq_enabled = 0; #ifdef __FreeBSD__ @@ -199,7 +199,7 @@ int drm_control(DRM_IOCTL_ARGS) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl.irq != dev->irq) - return DRM_ERR(EINVAL); + return EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: if (!dev->driver.use_irq) @@ -209,7 +209,7 @@ int drm_control(DRM_IOCTL_ARGS) DRM_UNLOCK(); return err; default: - return DRM_ERR(EINVAL); + return EINVAL; } } @@ -221,7 +221,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) int ret; if (!dev->irq_enabled) - return DRM_ERR(EINVAL); + return EINVAL; DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, sizeof(vblwait) ); @@ -253,7 +253,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) ret = EINVAL; } else { DRM_LOCK(); - ret = dev->driver.vblank_wait(dev, &vblwait.request.sequence); + ret = -dev->driver.vblank_wait(dev, &vblwait.request.sequence); DRM_UNLOCK(); microtime(&now); diff --git a/bsd-core/drm_vm.c b/bsd-core/drm_vm.c index 7f732c9b..d8561699 100644 --- a/bsd-core/drm_vm.c +++ b/bsd-core/drm_vm.c @@ -52,7 +52,7 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot) } if (!priv->authenticated) - return DRM_ERR(EACCES); + return EACCES; if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index d6cdba56..2787c9a3 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -130,7 +130,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) if (update.num && !rects) { DRM_ERROR("Failed to allocate cliprect memory\n"); - err = DRM_ERR(ENOMEM); + err = -ENOMEM; goto error; } @@ -140,7 +140,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) update.num * sizeof(*rects))) { DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = DRM_ERR(EFAULT); + err = -EFAULT; goto error; } @@ -161,7 +161,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) break; default: DRM_ERROR("Invalid update type %d\n", update.type); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index a7bacbb8..a2c3952c 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -121,7 +121,7 @@ int drm_setunique(struct inode *inode, struct file *filp, */ ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); if (ret != 3) - return DRM_ERR(EINVAL); + return -EINVAL; domain = bus >> 8; bus &= 0xff; diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 1ba01aab..f3685ce0 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -125,7 +125,7 @@ int drm_lock(struct inode *inode, struct file *filp, if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); - return DRM_ERR(EBUSY); + return -EBUSY; } } diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 9d0d3f69..3d2ad779 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -10,7 +10,6 @@ #define DRMFILE struct file * /** Ioctl arguments */ #define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data -#define DRM_ERR(d) -(d) /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 1e6d8cd3..4b43647e 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -399,7 +399,7 @@ static int i810_dma_initialize(struct drm_device * dev, i810_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index a65317cd..0ddac952 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -33,7 +33,7 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, DRM_DEBUG("num_pages = %ld\n", num_pages); if (nvbe->pagelist) - return DRM_ERR(EINVAL); + return -EINVAL; nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), DRM_MEM_PAGES); @@ -48,7 +48,7 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, if (pci_dma_mapping_error(nvbe->pagelist[d])) { be->func->clear(be); DRM_ERROR("pci_map_page failed\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } nvbe->pages_populated = ++d; } @@ -92,7 +92,7 @@ nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); if (offset & NV_CTXDMA_PAGE_MASK) - return DRM_ERR(EINVAL); + return -EINVAL; nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); if (dev_priv->card_type < NV_50) nvbe->pte_start += 2; /* skip ctxdma header */ @@ -102,7 +102,7 @@ nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, if (pteval & NV_CTXDMA_PAGE_MASK) { DRM_ERROR("Bad pteval 0x%llx\n", pteval); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev_priv->card_type < NV_50) { @@ -282,7 +282,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); if (!dev_priv->gart_info.sg_be) - return DRM_ERR(ENOMEM); + return -ENOMEM; be = dev_priv->gart_info.sg_be; /* Hack the aperture size down to the amount of system memory diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c index b4c3f93b..c9112c63 100644 --- a/linux-core/sis_drv.c +++ b/linux-core/sis_drv.c @@ -43,7 +43,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->chipset = chipset; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index edbf8bf4..f9c7a7e2 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -139,7 +139,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; @@ -158,7 +158,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, mem.offset = 0; mem.size = 0; mem.free = 0; - retval = DRM_ERR(ENOMEM); + retval = -ENOMEM; } DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index 0461b3c7..eb5ea826 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -60,7 +60,7 @@ static int via_vram_info(struct drm_device *dev, struct pci_dev *pdev = dev->pdev; unsigned long flags; - int ret = DRM_ERR(EINVAL); + int ret = -EINVAL; int i; for (i=0; i<6; ++i) { flags = pci_resource_flags(pdev, i); diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 6422609c..5e73bd1a 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -236,7 +236,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) first_pfn + 1; if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); down_read(¤t->mm->mmap_sem); ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, @@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) if (ret < 0) return ret; vsg->state = dr_via_pages_locked; - return DRM_ERR(EINVAL); + return -EINVAL; } vsg->state = dr_via_pages_locked; DRM_DEBUG("DMA pages locked\n"); @@ -271,14 +271,14 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) vsg->descriptors_per_page; if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); vsg->state = dr_via_desc_pages_alloc; for (i=0; inum_desc_pages; ++i) { if (NULL == (vsg->desc_pages[i] = (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) - return DRM_ERR(ENOMEM); + return -ENOMEM; } DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, vsg->num_desc); @@ -606,7 +606,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->num_lines <= 0 || xfer->line_length <= 0) { DRM_ERROR("Zero size bitblt.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -619,7 +619,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { DRM_ERROR("Too large system memory stride. Stride: %d, " "Length: %d\n", xfer->mem_stride, xfer->line_length); - return DRM_ERR(EINVAL); + return -EINVAL; } if ((xfer->mem_stride == xfer->line_length) && @@ -637,7 +637,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { DRM_ERROR("Too large PCI DMA bitblt.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -648,7 +648,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->mem_stride < xfer->line_length || abs(xfer->fb_stride) < xfer->line_length) { DRM_ERROR("Invalid frame-buffer / memory stride.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -661,13 +661,13 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #else if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #endif @@ -707,7 +707,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); if (ret) { - return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; + return (-EINTR == ret) ? -EAGAIN : ret; } spin_lock_irqsave(&blitq->blit_lock, irqsave); @@ -751,7 +751,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) if (dev_priv == NULL) { DRM_ERROR("Called without initialization.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } engine = (xfer->to_fb) ? 0 : 1; @@ -761,7 +761,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) } if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { via_dmablit_release_slot(blitq); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { via_dmablit_release_slot(blitq); @@ -801,12 +801,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); if (sync.engine >= VIA_NUM_BLIT_ENGINES) - return DRM_ERR(EINVAL); + return -EINVAL; err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); - if (DRM_ERR(EINTR) == err) - err = DRM_ERR(EAGAIN); + if (-EINTR) == err + err = -EAGAIN; return err; } diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a8db3d12..a6d4ece9 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -142,7 +142,7 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl *native_type = DRM_FENCE_TYPE_EXE; break; default: - ret = DRM_ERR(EINVAL); + ret = -EINVAL; break; } return ret; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 1ac51050..7cb8651d 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -138,7 +138,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) if (mem.type > VIA_MEM_AGP) { DRM_ERROR("Unknown memory type allocation\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } mutex_lock(&dev->struct_mutex); if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : @@ -146,7 +146,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); mutex_unlock(&dev->struct_mutex); - return DRM_ERR(EINVAL); + return -EINVAL; } tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; @@ -164,7 +164,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) mem.size = 0; mem.index = 0; DRM_DEBUG("Video memory allocation failed\n"); - retval = DRM_ERR(ENOMEM); + retval = -ENOMEM; } DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 5fb9fcff..81e6981d 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -71,7 +71,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) DRM_UDELAY(1); } - return DRM_ERR(EBUSY); + return -EBUSY; } void i915_kernel_lost_context(struct drm_device * dev) @@ -136,7 +136,7 @@ static int i915_initialize(struct drm_device * dev, DRM_ERROR("can not find sarea!\n"); dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); @@ -144,7 +144,7 @@ static int i915_initialize(struct drm_device * dev, dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("can not find mmio map!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->sarea_priv = (drm_i915_sarea_t *) @@ -168,7 +168,7 @@ static int i915_initialize(struct drm_device * dev, i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; @@ -198,7 +198,7 @@ static int i915_initialize(struct drm_device * dev, dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; @@ -220,24 +220,24 @@ static int i915_dma_resume(struct drm_device * dev) if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (!dev_priv->mmio_map) { DRM_ERROR("can not find mmio map!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } /* Program Hardware Status Page */ if (!dev_priv->hw_status_page) { DRM_ERROR("Can not find hardware status page\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); @@ -265,7 +265,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; retcode = i915_initialize(dev, dev_priv, &init); break; case I915_CLEANUP_DMA: @@ -275,7 +275,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) retcode = i915_dma_resume(dev); break; default: - retcode = DRM_ERR(EINVAL); + retcode = -EINVAL; break; } @@ -365,7 +365,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor RING_LOCALS; if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) - return DRM_ERR(EINVAL); + return -EINVAL; BEGIN_LP_RING((dwords+1)&~1); @@ -373,17 +373,17 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor int cmd, sz; if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) - return DRM_ERR(EINVAL); + return -EINVAL; if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) - return DRM_ERR(EINVAL); + return -EINVAL; OUT_RING(cmd); while (++i, --sz) { if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) { - return DRM_ERR(EINVAL); + return -EINVAL; } OUT_RING(cmd); } @@ -406,13 +406,13 @@ static int i915_emit_box(struct drm_device * dev, RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { - return DRM_ERR(EFAULT); + return -EFAULT; } if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box.x1, box.y1, box.x2, box.y2); - return DRM_ERR(EINVAL); + return -EINVAL; } if (IS_I965G(dev)) { @@ -493,7 +493,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, if (cmd->sz & 0x3) { DRM_ERROR("alignment"); - return DRM_ERR(EINVAL); + return -EINVAL; } i915_kernel_lost_context(dev); @@ -531,7 +531,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); - return DRM_ERR(EINVAL); + return -EINVAL; } i915_kernel_lost_context(dev); @@ -670,7 +670,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, @@ -684,7 +684,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, batch.num_cliprects * sizeof(struct drm_clip_rect))) - return DRM_ERR(EFAULT); + return -EFAULT; ret = i915_dispatch_batchbuffer(dev, &batch); @@ -714,7 +714,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) cmdbuf.num_cliprects * sizeof(struct drm_clip_rect))) { DRM_ERROR("Fault accessing cliprects\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); @@ -764,7 +764,7 @@ static int i915_flip_bufs(DRM_IOCTL_ARGS) if (param.pipes & ~0x3) { DRM_ERROR("Invalid pipes 0x%x, only <= 0x3 is valid\n", param.pipes); - return DRM_ERR(EINVAL); + return -EINVAL; } i915_dispatch_flip(dev, param.pipes, 0); @@ -782,7 +782,7 @@ static int i915_getparam(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, @@ -800,12 +800,12 @@ static int i915_getparam(DRM_IOCTL_ARGS) break; default: DRM_ERROR("Unknown parameter %d\n", param.param); - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -819,7 +819,7 @@ static int i915_setparam(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, @@ -837,7 +837,7 @@ static int i915_setparam(DRM_IOCTL_ARGS) break; default: DRM_ERROR("unknown parameter %d\n", param.param); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -865,13 +865,13 @@ static int i915_mmio(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_mmio_t __user *) data, sizeof(mmio)); if (mmio.reg >= mmio_table_size) - return DRM_ERR(EINVAL); + return -EINVAL; e = &mmio_table[mmio.reg]; base = (u8 *) dev_priv->mmio_map->handle + e->offset; @@ -879,21 +879,21 @@ static int i915_mmio(DRM_IOCTL_ARGS) switch (mmio.read_write) { case I915_MMIO_READ: if (!(e->flag & I915_MMIO_MAY_READ)) - return DRM_ERR(EINVAL); + return -EINVAL; for (i = 0; i < e->size / 4; i++) buf[i] = I915_READ(e->offset + i * 4); if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } break; case I915_MMIO_WRITE: if (!(e->flag & I915_MMIO_MAY_WRITE)) - return DRM_ERR(EINVAL); + return -EINVAL; if(DRM_COPY_FROM_USER(buf, mmio.data, e->size)) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } for (i = 0; i < e->size / 4; i++) I915_WRITE(e->offset + i * 4, buf[i]); @@ -910,7 +910,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, sizeof(hws)); @@ -931,7 +931,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) dev_priv->status_gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->hw_status_page = dev_priv->hws_map.handle; diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 698ecced..f4775b75 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -406,7 +406,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) READ_BREADCRUMB(dev_priv) >= irq_nr); i915_user_irq_off(dev_priv); - if (ret == DRM_ERR(EBUSY)) { + if (ret == -EBUSY) { DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", __FUNCTION__, READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); @@ -425,7 +425,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, @@ -460,7 +460,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, @@ -470,7 +470,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS) if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -486,7 +486,7 @@ int i915_irq_wait(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, @@ -519,7 +519,7 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, @@ -528,7 +528,7 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS) if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { DRM_ERROR("%s called with invalid pipe 0x%x\n", __FUNCTION__, pipe.pipe); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->vblank_pipe = pipe.pipe; @@ -547,7 +547,7 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } flag = I915_READ(I915REG_INT_ENABLE_R); @@ -576,12 +576,12 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __func__); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev_priv->sarea_priv->rotation) { DRM_DEBUG("Rotation not supported\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, @@ -591,7 +591,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS | _DRM_VBLANK_FLIP)) { DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); - return DRM_ERR(EINVAL); + return -EINVAL; } pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; @@ -600,7 +600,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (!(dev_priv->vblank_pipe & (1 << pipe))) { DRM_ERROR("Invalid pipe %d\n", pipe); - return DRM_ERR(EINVAL); + return -EINVAL; } curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); @@ -613,7 +613,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) swap.sequence = curseq + 1; } else { DRM_DEBUG("Missed target sequence\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -634,7 +634,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) irqflags); DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); - return DRM_ERR(EINVAL); + return -EINVAL; } i915_dispatch_vsync_flip(dev, drw, pipe); @@ -664,14 +664,14 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (dev_priv->swaps_pending >= 100) { DRM_DEBUG("Too many swaps queued\n"); - return DRM_ERR(EBUSY); + return -EBUSY; } vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); if (!vbl_swap) { DRM_ERROR("Failed to allocate memory to queue swap\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } DRM_DEBUG("\n"); diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index 582687ad..381562d8 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -276,7 +276,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, @@ -284,7 +284,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) heap = get_heap(dev_priv, alloc.region); if (!heap || !*heap) - return DRM_ERR(EFAULT); + return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. @@ -295,13 +295,13 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) block = alloc_block(*heap, alloc.size, alloc.alignment, filp); if (!block) - return DRM_ERR(ENOMEM); + return -ENOMEM; mark_block(dev, block, 1); if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -316,7 +316,7 @@ int i915_mem_free(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, @@ -324,14 +324,14 @@ int i915_mem_free(DRM_IOCTL_ARGS) heap = get_heap(dev_priv, memfree.region); if (!heap || !*heap) - return DRM_ERR(EFAULT); + return -EFAULT; block = find_block(*heap, memfree.region_offset); if (!block) - return DRM_ERR(EFAULT); + return -EFAULT; if (block->filp != filp) - return DRM_ERR(EPERM); + return -EPERM; mark_block(dev, block, 0); free_block(block); @@ -347,7 +347,7 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(initheap, @@ -356,11 +356,11 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS) heap = get_heap(dev_priv, initheap.region); if (!heap) - return DRM_ERR(EFAULT); + return -EFAULT; if (*heap) { DRM_ERROR("heap already initialized?"); - return DRM_ERR(EFAULT); + return -EFAULT; } return init_heap(heap, initheap.start, initheap.size); @@ -375,7 +375,7 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, @@ -384,12 +384,12 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) heap = get_heap( dev_priv, destroyheap.region ); if (!heap) { DRM_ERROR("get_heap failed"); - return DRM_ERR(EFAULT); + return -EFAULT; } if (!*heap) { DRM_ERROR("heap not initialized?"); - return DRM_ERR(EFAULT); + return -EFAULT; } i915_mem_takedown( heap ); diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index d833475f..fec73076 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -70,7 +70,7 @@ int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries) DRM_INFO("%s failed! slots=%d entries=%d\n", __FUNCTION__, slots, entries); - return DRM_ERR(EBUSY); + return -EBUSY; } /** @@ -94,7 +94,7 @@ int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv) DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__, MACH64_READ(MACH64_GUI_STAT)); mach64_dump_ring_info(dev_priv); - return DRM_ERR(EBUSY); + return -EBUSY; } /** @@ -135,7 +135,7 @@ int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n) /* FIXME: This is being ignored... */ DRM_ERROR("failed!\n"); mach64_dump_ring_info(dev_priv); - return DRM_ERR(EBUSY); + return -EBUSY; } /** @@ -172,7 +172,7 @@ static int mach64_ring_idle(drm_mach64_private_t * dev_priv) DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__, MACH64_READ(MACH64_GUI_STAT)); mach64_dump_ring_info(dev_priv); - return DRM_ERR(EBUSY); + return -EBUSY; } /** @@ -592,7 +592,7 @@ static int mach64_bm_dma_test(struct drm_device * dev) drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful); if (!cpu_addr_dmah) { DRM_INFO("data-memory allocation failed!\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } else { data = (u32 *) cpu_addr_dmah->vaddr; data_addr = (u32) cpu_addr_dmah->busaddr; @@ -624,7 +624,7 @@ static int mach64_bm_dma_test(struct drm_device * dev) mach64_do_engine_reset(dev_priv); DRM_INFO("freeing data buffer memory.\n"); drm_pci_free(dev, cpu_addr_dmah); - return DRM_ERR(EIO); + return -EIO; } } @@ -762,7 +762,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(dev_priv, 0, sizeof(drm_mach64_private_t)); @@ -797,21 +797,21 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) DRM_ERROR("can not find sarea!\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->fb = drm_core_findmap(dev, init->fb_offset); if (!dev_priv->fb) { DRM_ERROR("can not find frame buffer map!\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio) { DRM_ERROR("can not find mmio map!\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset); @@ -819,7 +819,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) DRM_ERROR("can not find ring map!\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->sarea_priv = (drm_mach64_sarea_t *) @@ -832,7 +832,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) " descriptor ring\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = @@ -841,7 +841,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) DRM_ERROR("can not find dma buffer map!\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } /* there might be a nicer way to do this - dev isn't passed all the way though the mach64 - DA */ @@ -853,7 +853,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) " dma buffer\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset); @@ -861,7 +861,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) DRM_ERROR("can not find agp texture region!\n"); dev->dev_private = (void *)dev_priv; mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -1035,7 +1035,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv) head, ring->tail, buf_addr, (eol ? "eol" : "")); mach64_dump_ring_info(dev_priv); mach64_do_engine_reset(dev_priv); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Hand feed the buffer to the card via MMIO, waiting for the fifo @@ -1177,7 +1177,7 @@ int mach64_dma_init(DRM_IOCTL_ARGS) return mach64_do_cleanup_dma(dev); } - return DRM_ERR(EINVAL); + return -EINVAL; } int mach64_dma_idle(DRM_IOCTL_ARGS) @@ -1239,7 +1239,7 @@ int mach64_init_freelist(struct drm_device * dev) (drm_mach64_freelist_t *) drm_alloc(sizeof(drm_mach64_freelist_t), DRM_MEM_BUFLISTS)) == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(entry, 0, sizeof(drm_mach64_freelist_t)); entry->buf = dma->buflist[i]; ptr = &entry->list; @@ -1438,7 +1438,7 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_b if (copy_buf == entry->buf) { DRM_ERROR("%s: Trying to release a pending buf\n", __FUNCTION__); - return DRM_ERR(EFAULT); + return -EFAULT; } } #endif @@ -1472,20 +1472,20 @@ static int mach64_dma_get_buffers(DRMFILE filp, struct drm_device * dev, buf = mach64_freelist_get(dev_priv); #if MACH64_EXTRA_CHECKING if (!buf) - return DRM_ERR(EFAULT); + return -EFAULT; #else if (!buf) - return DRM_ERR(EAGAIN); + return -EAGAIN; #endif buf->filp = filp; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) - return DRM_ERR(EFAULT); + return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) - return DRM_ERR(EFAULT); + return -EFAULT; d->granted_count++; } @@ -1508,7 +1508,7 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) if (d.send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d.send_count); - return DRM_ERR(EINVAL); + return -EINVAL; } /* We'll send you buffers. @@ -1516,7 +1516,7 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) if (d.request_count < 0 || d.request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d.request_count, dma->buf_count); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; } d.granted_count = 0; diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index a1b36751..5d83c861 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -819,14 +819,14 @@ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t * #if MACH64_EXTRA_CHECKING if (list_empty(&dev_priv->pending)) { DRM_ERROR("Empty pending list in %s\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } #endif ptr = dev_priv->pending.prev; *entry = list_entry(ptr, drm_mach64_freelist_t, list); while ((*entry)->buf != buf) { if (ptr == &dev_priv->pending) { - return DRM_ERR(EFAULT); + return -EFAULT; } ptr = ptr->prev; *entry = list_entry(ptr, drm_mach64_freelist_t, list); @@ -852,12 +852,12 @@ do { \ if (_buf == NULL) { \ DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n", \ __FUNCTION__ ); \ - return DRM_ERR(EAGAIN); \ + return -EAGAIN; \ } \ if (_buf->pending) { \ DRM_ERROR("%s: pending buf in DMAGETPTR\n", \ __FUNCTION__ ); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ _buf->filp = filp; \ _outcount = 0; \ @@ -888,7 +888,7 @@ do { \ if (_buf->used <= 0) { \ DRM_ERROR( "DMAADVANCE() in %s: sending empty buf %d\n", \ __FUNCTION__, _buf->idx ); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ if (_buf->pending) { \ /* This is a resued buffer, so we need to find it in the pending list */ \ @@ -901,13 +901,13 @@ do { \ if (_entry->discard) { \ DRM_ERROR( "DMAADVANCE() in %s: sending discarded pending buf %d\n", \ __FUNCTION__, _buf->idx ); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ } else { \ if (list_empty(&dev_priv->placeholders)) { \ DRM_ERROR( "DMAADVANCE() in %s: empty placeholder list\n", \ __FUNCTION__ ); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ ptr = dev_priv->placeholders.next; \ list_del(ptr); \ @@ -983,12 +983,12 @@ do { \ if (_buf->used <= 0) { \ DRM_ERROR( "DMAADVANCEHOSTDATA() in %s: sending empty buf %d\n", \ __FUNCTION__, _buf->idx ); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ if (list_empty(&dev_priv->placeholders)) { \ DRM_ERROR( "%s: empty placeholder list in DMAADVANCEHOSTDATA()\n", \ __FUNCTION__ ); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ \ ptr = dev_priv->placeholders.next; \ diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 95ad1ec3..397faaaa 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -237,7 +237,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, fb_bpp = MACH64_DATATYPE_ARGB8888; break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } switch (dev_priv->depth_bpp) { case 16: @@ -248,7 +248,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, depth_bpp = MACH64_DATATYPE_ARGB8888; break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } if (!nbox) @@ -489,11 +489,11 @@ static __inline__ int copy_from_user_vertex(u32 *to, from = drm_alloc(bytes, DRM_MEM_DRIVER); if (from == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; if (DRM_COPY_FROM_USER(from, ufrom, bytes)) { drm_free(from, bytes, DRM_MEM_DRIVER); - return DRM_ERR(EFAULT); + return -EFAULT; } orig_from = from; /* we'll be modifying the "from" ptr, so save it */ @@ -525,14 +525,14 @@ static __inline__ int copy_from_user_vertex(u32 *to, DRM_ERROR("%s: Got bad command: 0x%04x\n", __FUNCTION__, reg); drm_free(orig_from, bytes, DRM_MEM_DRIVER); - return DRM_ERR(EACCES); + return -EACCES; } } else { DRM_ERROR ("%s: Got bad command count(=%u) dwords remaining=%lu\n", __FUNCTION__, count, n); drm_free(orig_from, bytes, DRM_MEM_DRIVER); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -541,7 +541,7 @@ static __inline__ int copy_from_user_vertex(u32 *to, return 0; else { DRM_ERROR("%s: Bad buf->used(=%lu)\n", __FUNCTION__, bytes); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -568,7 +568,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, copy_buf = mach64_freelist_get(dev_priv); if (copy_buf == NULL) { DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__); - return DRM_ERR(EAGAIN); + return -EAGAIN; } verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used); @@ -634,7 +634,7 @@ static __inline__ int copy_from_user_blit(u32 *to, to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET); if (DRM_COPY_FROM_USER(to, ufrom, bytes)) { - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -671,7 +671,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, struct drm_device * dev, break; default: DRM_ERROR("invalid blit format %d\n", blit->format); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Set buf->used to the bytes of blit data based on the blit dimensions @@ -684,13 +684,13 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, struct drm_device * dev, if (used <= 0 || used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) { DRM_ERROR("Invalid blit size: %lu bytes\n", used); - return DRM_ERR(EINVAL); + return -EINVAL; } copy_buf = mach64_freelist_get(dev_priv); if (copy_buf == NULL) { DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__); - return DRM_ERR(EAGAIN); + return -EAGAIN; } verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used); @@ -814,7 +814,7 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(vertex, (drm_mach64_vertex_t *) data, @@ -826,13 +826,13 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS) if (vertex.prim < 0 || vertex.prim > MACH64_PRIM_POLYGON) { DRM_ERROR("buffer prim %d\n", vertex.prim); - return DRM_ERR(EINVAL); + return -EINVAL; } if (vertex.used > MACH64_BUFFER_SIZE || (vertex.used & 3) != 0) { DRM_ERROR("Invalid vertex buffer size: %lu bytes\n", vertex.used); - return DRM_ERR(EINVAL); + return -EINVAL; } if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) @@ -875,7 +875,7 @@ int mach64_get_param(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(param, (drm_mach64_getparam_t *) data, @@ -891,12 +891,12 @@ int mach64_get_param(DRM_IOCTL_ARGS) value = dev->irq; break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index 9bed3b34..cbcb6380 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -71,7 +71,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int mga_do_dma_reset(drm_mga_private_t * dev_priv) @@ -256,7 +256,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); if (dev_priv->head == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); @@ -267,7 +267,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); if (entry == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(entry, 0, sizeof(drm_mga_freelist_t)); @@ -399,7 +399,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); if (!dev_priv) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev->dev_private = (void *)dev_priv; memset(dev_priv, 0, sizeof(drm_mga_private_t)); @@ -579,7 +579,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", dev_priv->warp->handle, dev_priv->primary->handle, dev->agp_buffer_map->handle); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->dma_access = MGA_PAGPXFER; @@ -616,7 +616,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, if (dev->dma == NULL) { DRM_ERROR("dev->dma is NULL\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } /* Make drm_addbufs happy by not trying to create a mapping for less @@ -651,7 +651,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, if (err != 0) { DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (dev_priv->primary->size != dma_bs->primary_size) { @@ -833,7 +833,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("failed to find sarea!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (! dev_priv->used_new_dma_init) { @@ -844,28 +844,28 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("failed to find status page!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio) { DRM_ERROR("failed to find mmio region!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->warp = drm_core_findmap(dev, init->warp_offset); if (!dev_priv->warp) { DRM_ERROR("failed to find warp microcode region!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->primary = drm_core_findmap(dev, init->primary_offset); if (!dev_priv->primary) { DRM_ERROR("failed to find primary dma region!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("failed to find dma buffer region!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } drm_core_ioremap(dev_priv->warp, dev); @@ -883,7 +883,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) ((dev->agp_buffer_map == NULL) || (dev->agp_buffer_map->handle == NULL)))) { DRM_ERROR("failed to ioremap agp regions!\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } ret = mga_warp_install_microcode(dev_priv); @@ -933,7 +933,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) if (mga_freelist_init(dev, dev_priv) < 0) { DRM_ERROR("could not initialize freelist\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } return 0; @@ -1032,7 +1032,7 @@ int mga_dma_init(DRM_IOCTL_ARGS) return mga_do_cleanup_dma(dev, FULL_CLEANUP); } - return DRM_ERR(EINVAL); + return -EINVAL; } /* ================================================================ @@ -1097,16 +1097,16 @@ static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm for (i = d->granted_count; i < d->request_count; i++) { buf = mga_freelist_get(dev); if (!buf) - return DRM_ERR(EAGAIN); + return -EAGAIN; buf->filp = filp; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) - return DRM_ERR(EFAULT); + return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) - return DRM_ERR(EFAULT); + return -EFAULT; d->granted_count++; } @@ -1131,7 +1131,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) if (d.send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d.send_count); - return DRM_ERR(EINVAL); + return -EINVAL; } /* We'll send you buffers. @@ -1139,7 +1139,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) if (d.request_count < 0 || d.request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d.request_count, dma->buf_count); - return DRM_ERR(EINVAL); + return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h index 10096a95..2da31194 100644 --- a/shared-core/mga_drv.h +++ b/shared-core/mga_drv.h @@ -245,7 +245,7 @@ do { \ dev_priv->prim.high_mark ) { \ if ( MGA_DMA_DEBUG ) \ DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ - return DRM_ERR(EBUSY); \ + return -EBUSY; \ } \ } \ } while (0) @@ -256,7 +256,7 @@ do { \ if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ if ( MGA_DMA_DEBUG ) \ DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ - return DRM_ERR(EBUSY); \ + return -EBUSY; \ } \ mga_do_dma_wrap_end( dev_priv ); \ } \ diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 6d93c9e4..f77883db 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -416,7 +416,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv) ctx->dstorg, dev_priv->front_offset, dev_priv->back_offset); ctx->dstorg = 0; - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -435,7 +435,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); tex->texorg = 0; - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -477,13 +477,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv, dstorg + length > (dev_priv->texture_offset + dev_priv->texture_size)) { DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); - return DRM_ERR(EINVAL); + return -EINVAL; } if (length & MGA_ILOAD_MASK) { DRM_ERROR("*** bad iload length: 0x%x\n", length & MGA_ILOAD_MASK); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -495,7 +495,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv, if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; } @@ -929,7 +929,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) sizeof(vertex)); if (vertex.idx < 0 || vertex.idx > dma->buf_count) - return DRM_ERR(EINVAL); + return -EINVAL; buf = dma->buflist[vertex.idx]; buf_priv = buf->dev_private; @@ -943,7 +943,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } - return DRM_ERR(EINVAL); + return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); @@ -969,7 +969,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) sizeof(indices)); if (indices.idx < 0 || indices.idx > dma->buf_count) - return DRM_ERR(EINVAL); + return -EINVAL; buf = dma->buflist[indices.idx]; buf_priv = buf->dev_private; @@ -983,7 +983,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } - return DRM_ERR(EINVAL); + return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); @@ -1012,18 +1012,18 @@ static int mga_dma_iload(DRM_IOCTL_ARGS) if (mga_do_wait_for_idle(dev_priv) < 0) { if (MGA_DMA_DEBUG) DRM_INFO("%s: -EBUSY\n", __FUNCTION__); - return DRM_ERR(EBUSY); + return -EBUSY; } #endif if (iload.idx < 0 || iload.idx > dma->buf_count) - return DRM_ERR(EINVAL); + return -EINVAL; buf = dma->buflist[iload.idx]; buf_priv = buf->dev_private; if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { mga_freelist_put(dev, buf); - return DRM_ERR(EINVAL); + return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); @@ -1054,7 +1054,7 @@ static int mga_dma_blit(DRM_IOCTL_ARGS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) - return DRM_ERR(EINVAL); + return -EINVAL; WRAP_TEST_WITH_RETURN(dev_priv); @@ -1076,7 +1076,7 @@ static int mga_getparam(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data, @@ -1092,12 +1092,12 @@ static int mga_getparam(DRM_IOCTL_ARGS) value = dev_priv->chipset; break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -1112,7 +1112,7 @@ static int mga_set_fence(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); @@ -1144,7 +1144,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); diff --git a/shared-core/mga_warp.c b/shared-core/mga_warp.c index 05ed8058..9a44bddb 100644 --- a/shared-core/mga_warp.c +++ b/shared-core/mga_warp.c @@ -146,7 +146,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv) if (size > dev_priv->warp->size) { DRM_ERROR("microcode too large! (%u > %lu)\n", size, dev_priv->warp->size); - return DRM_ERR(ENOMEM); + return -ENOMEM; } switch (dev_priv->chipset) { @@ -156,7 +156,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv) case MGA_CARD_TYPE_G200: return mga_warp_install_g200_microcode(dev_priv); default: - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -182,7 +182,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv) MGA_WRITE(MGA_WVRTXSZ, 7); break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | @@ -191,7 +191,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv) if (wmisc != WMISC_EXPECTED) { DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", wmisc, WMISC_EXPECTED); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 230c8298..e3a6674d 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -207,7 +207,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) (DRMFILE)-2); if (!cb) { DRM_ERROR("Couldn't allocate DMA command buffer.\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (cb->flags & NOUVEAU_MEM_AGP) { @@ -289,13 +289,13 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, DRMFILE filp, } /* no more fifos. you lost. */ if (channel==nouveau_fifo_number(dev)) - return DRM_ERR(EINVAL); + return -EINVAL; (*chan_ret) = channel; dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_fifo), DRM_MEM_DRIVER); if (!dev_priv->fifos[channel]) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev_priv->fifo_alloc_count++; chan = dev_priv->fifos[channel]; chan->filp = filp; @@ -483,7 +483,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) sizeof(init)); if (init.fb_ctxdma_handle == ~0 || init.tt_ctxdma_handle == ~0) - return DRM_ERR(EINVAL); + return -EINVAL; res = nouveau_fifo_alloc(dev, &init.channel, filp, init.fb_ctxdma_handle, @@ -511,7 +511,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) entry = drm_find_matching_map(dev, chan->regs); if (!entry) - return DRM_ERR(EINVAL); + return -EINVAL; init.ctrl = entry->user_token; /* pass back FIFO map info to the caller */ diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 7a923e17..143378ff 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -138,12 +138,12 @@ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); if (!blocks) - return DRM_ERR(ENOMEM); + return -ENOMEM; *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); if (!*heap) { drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } blocks->start = start; @@ -363,13 +363,13 @@ int nouveau_mem_init(struct drm_device *dev) * So we create a second FB heap for that type of memory */ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, 256*1024*1024)) - return DRM_ERR(ENOMEM); + return -ENOMEM; if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, 256*1024*1024, fb_size-256*1024*1024)) - return DRM_ERR(ENOMEM); + return -ENOMEM; } else { if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev_priv->fb_nomap_heap=NULL; } @@ -549,7 +549,7 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(alloc, @@ -558,7 +558,7 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp); if (!block) - return DRM_ERR(ENOMEM); + return -ENOMEM; alloc.map_handle=block->map_handle; alloc.offset=block->start; alloc.flags=block->flags; @@ -588,9 +588,9 @@ int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) else if (memfree.flags&NOUVEAU_MEM_PCI) block = find_block(dev_priv->pci_heap, memfree.offset); if (!block) - return DRM_ERR(EFAULT); + return -EFAULT; if (block->filp != filp) - return DRM_ERR(EPERM); + return -EPERM; nouveau_mem_free(dev, block); return 0; diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 238e3c8b..425e471c 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -46,7 +46,7 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp); if (!chan->notifier_block) - return DRM_ERR(ENOMEM); + return -ENOMEM; ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, chan->notifier_block->size); @@ -84,13 +84,13 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, if (!chan->notifier_heap) { DRM_ERROR("Channel %d doesn't have a notifier heap!\n", channel); - return DRM_ERR(EINVAL); + return -EINVAL; } mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->filp); if (!mem) { DRM_ERROR("Channel %d notifier block full\n", channel); - return DRM_ERR(ENOMEM); + return -ENOMEM; } mem->flags = NOUVEAU_MEM_NOTIFIER; @@ -102,7 +102,7 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, } else { DRM_ERROR("Bad DMA target, flags 0x%08x!\n", chan->notifier_block->flags); - return DRM_ERR(EINVAL); + return -EINVAL; } if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, @@ -138,7 +138,7 @@ nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) if (!nouveau_fifo_owner(dev, filp, na.channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", DRM_CURRENTPID, na.channel); - return DRM_ERR(EPERM); + return -EPERM; } ret = nouveau_notifier_alloc(dev, na.channel, na.handle, diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index f0025d7a..30d515f0 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -107,7 +107,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) if (!ramht) { DRM_ERROR("No hash table!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev_priv->card_type < NV_40) { @@ -142,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) } while (co != ho); DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); - return DRM_ERR(ENOMEM); + return -ENOMEM; } static void @@ -194,17 +194,17 @@ nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, channel, size, align, flags); if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) - return DRM_ERR(EINVAL); + return -EINVAL; if (channel >= 0) { if (channel > nouveau_fifo_number(dev)) - return DRM_ERR(EINVAL); + return -EINVAL; chan = dev_priv->fifos[channel]; } gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); if (!gpuobj) - return DRM_ERR(ENOMEM); + return -ENOMEM; DRM_DEBUG("gpuobj %p\n", gpuobj); gpuobj->flags = flags; gpuobj->im_channel = channel; @@ -230,7 +230,7 @@ nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, if (!pramin) { DRM_ERROR("No PRAMIN heap!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { @@ -244,7 +244,7 @@ nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, (DRMFILE)-2); if (!gpuobj->im_pramin) { nouveau_gpuobj_del(dev, &gpuobj); - return DRM_ERR(ENOMEM); + return -ENOMEM; } gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; @@ -294,12 +294,12 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); if (!dev_priv || !pgpuobj || !(*pgpuobj)) - return DRM_ERR(EINVAL); + return -EINVAL; gpuobj = *pgpuobj; if (gpuobj->refcount != 0) { DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); - return DRM_ERR(EINVAL); + return -EINVAL; } engine->instmem.clear(dev, gpuobj); @@ -340,7 +340,7 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, if ((channel > 0) && gpuobj->im_channel != channel) { DRM_ERROR("Channel mismatch: obj %d, ref %d\n", gpuobj->im_channel, channel); - return DRM_ERR(EINVAL); + return -EINVAL; } /* NV50 channel-local instance */ @@ -355,7 +355,7 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, /* ...from global heap */ if (!gpuobj->im_backing) { DRM_ERROR("AII, no VRAM backing gpuobj\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } *inst = gpuobj->im_backing->start; return 0; @@ -367,7 +367,7 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, return 0; } - return DRM_ERR(EINVAL); + return -EINVAL; } int @@ -383,15 +383,15 @@ nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle, DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", channel, handle, gpuobj); if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) - return DRM_ERR(EINVAL); + return -EINVAL; if (channel >= 0) { if (channel > nouveau_fifo_number(dev)) - return DRM_ERR(EINVAL); + return -EINVAL; chan = dev_priv->fifos[channel]; } else if (!ref_ret) - return DRM_ERR(EINVAL); + return -EINVAL; ret = nouveau_gpuobj_instance_get(dev, channel, gpuobj, &instance); if (ret) @@ -399,7 +399,7 @@ nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle, ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); if (!ref) - return DRM_ERR(ENOMEM); + return -ENOMEM; ref->gpuobj = gpuobj; ref->channel = channel; ref->instance = instance; @@ -431,7 +431,7 @@ int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **p DRM_DEBUG("ref %p\n", pref ? *pref : NULL); if (!dev || !pref || *pref == NULL) - return DRM_ERR(EINVAL); + return -EINVAL; ref = *pref; if (ref->handle != ~0) @@ -487,7 +487,7 @@ nouveau_gpuobj_ref_find(struct drm_device *dev, int channel, uint32_t handle, ref = ref->next; } - return DRM_ERR(EINVAL); + return -EINVAL; } int @@ -504,7 +504,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); if (!gpuobj) - return DRM_ERR(ENOMEM); + return -ENOMEM; DRM_DEBUG("gpuobj %p\n", gpuobj); gpuobj->im_channel = -1; gpuobj->flags = flags | NVOBJ_FLAG_FAKE; @@ -513,7 +513,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, DRM_MEM_DRIVER); if (!gpuobj->im_pramin) { nouveau_gpuobj_del(dev, &gpuobj); - return DRM_ERR(ENOMEM); + return -ENOMEM; } gpuobj->im_pramin->start = offset; gpuobj->im_pramin->size = size; @@ -650,7 +650,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, if ((idx + page_count) > dev->sg->pages) { DRM_ERROR("Requested page range exceedes " "allocated scatter-gather range!"); - return DRM_ERR(E2BIG); + return -E2BIG; } DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); @@ -676,7 +676,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev->sg->busaddr[idx])) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } } @@ -733,14 +733,14 @@ nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, *gpuobj = dev_priv->gart_info.sg_ctxdma; if (offset & ~0xffffffffULL) { DRM_ERROR("obj offset exceeds 32-bits\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (o_ret) *o_ret = (uint32_t)offset; - ret = (*gpuobj != NULL) ? 0 : DRM_ERR(EINVAL); + ret = (*gpuobj != NULL) ? 0 : -EINVAL; } else { DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); - return DRM_ERR(EINVAL); + return -EINVAL; } return ret; @@ -982,7 +982,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, NV_DMA_TARGET_PCI_NONLINEAR, &tt); } else { DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; } if (ret) { @@ -1038,15 +1038,15 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) if (!nouveau_fifo_owner(dev, filp, init.channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", DRM_CURRENTPID, init.channel); - return DRM_ERR(EINVAL); + return -EINVAL; } //FIXME: check args, only allow trusted objects to be created if (init.handle == ~0) - return DRM_ERR(EINVAL); + return -EINVAL; if (nouveau_gpuobj_ref_find(dev, init.channel, init.handle, NULL) == 0) - return DRM_ERR(EEXIST); + return -EEXIST; if ((ret = nouveau_gpuobj_gr_new(dev, init.channel, init.class, &gr))) { DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index a26ecea3..b6459957 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -383,11 +383,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) struct drm_nouveau_private *dev_priv; if (flags==NV_UNKNOWN) - return DRM_ERR(EINVAL); + return -EINVAL; dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER); if (!dev_priv) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev_priv->card_type=flags&NOUVEAU_FAMILY; dev_priv->flags=flags&NOUVEAU_FLAGS; @@ -463,7 +463,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) else { DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case NOUVEAU_GETPARAM_FB_SIZE: @@ -474,7 +474,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) break; default: DRM_ERROR("unknown parameter %lld\n", getparam.param); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_getparam __user *)data, @@ -503,7 +503,7 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) default: DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n", setparam.value); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->config.cmdbuf.location = setparam.value; break; @@ -512,7 +512,7 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) break; default: DRM_ERROR("unknown parameter %lld\n", setparam.param); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index 7cf06269..35b20abd 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -126,7 +126,7 @@ int nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) { if (gpuobj->im_backing) - return DRM_ERR(EINVAL); + return -EINVAL; return 0; } @@ -148,7 +148,7 @@ int nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { if (!gpuobj->im_pramin || gpuobj->im_bound) - return DRM_ERR(EINVAL); + return -EINVAL; gpuobj->im_bound = 1; return 0; @@ -158,7 +158,7 @@ int nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { if (gpuobj->im_bound == 0) - return DRM_ERR(EINVAL); + return -EINVAL; gpuobj->im_bound = 0; return 0; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 8af3bd12..1670c527 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -81,7 +81,7 @@ int nv20_graph_save_context(struct drm_device *dev, int channel) { instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); if (!instance) { - return DRM_ERR(EINVAL); + return -EINVAL; } if (instance != (chan->ramin_grctx->instance >> 4)) DRM_ERROR("nv20_graph_save_context : bad instance\n"); @@ -102,7 +102,7 @@ int nv20_graph_load_context(struct drm_device *dev, int channel) { instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); if (!instance) { - return DRM_ERR(EINVAL); + return -EINVAL; } if (instance != (chan->ramin_grctx->instance >> 4)) DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index d7138772..4ed2e2ba 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -156,7 +156,7 @@ nouveau_graph_wait_idle(struct drm_device *dev) if (NV_READ(0x400700)) { DRM_ERROR("timeout!\n"); - return DRM_ERR(EBUSY); + return -EBUSY; } return 0; } @@ -168,7 +168,7 @@ int nv30_graph_load_context(struct drm_device *dev, int channel) uint32_t inst; if (!chan->ramin_grctx) - return DRM_ERR(EINVAL); + return -EINVAL; inst = chan->ramin_grctx->instance >> 4; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); @@ -185,7 +185,7 @@ int nv30_graph_save_context(struct drm_device *dev, int channel) uint32_t inst; if (!chan->ramin_grctx) - return DRM_ERR(EINVAL); + return -EINVAL; inst = chan->ramin_grctx->instance >> 4; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index d8fccb7e..441dbae7 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1317,7 +1317,7 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(NV40_PGRAPH_CTXCTL_030C)); - return DRM_ERR(EBUSY); + return -EBUSY; } return 0; @@ -1334,7 +1334,7 @@ nv40_graph_save_context(struct drm_device *dev, int channel) uint32_t inst; if (!chan->ramin_grctx) - return DRM_ERR(EINVAL); + return -EINVAL; inst = chan->ramin_grctx->instance >> 4; return nv40_graph_transfer_context(dev, inst, 1); @@ -1352,7 +1352,7 @@ nv40_graph_load_context(struct drm_device *dev, int channel) int ret; if (!chan->ramin_grctx) - return DRM_ERR(EINVAL); + return -EINVAL; inst = chan->ramin_grctx->instance >> 4; ret = nv40_graph_transfer_context(dev, inst, 0); diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index 4933bbf3..f7b98220 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -69,14 +69,14 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel) if (IS_G80) { if (!chan->ramin) - return DRM_ERR(EINVAL); + return -EINVAL; NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), (chan->ramin->instance >> 12) | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); } else { if (!chan->ramfc) - return DRM_ERR(EINVAL); + return -EINVAL; NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), (chan->ramfc->instance >> 8) | @@ -186,7 +186,7 @@ nv50_fifo_init(struct drm_device *dev) priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); if (!priv) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev_priv->Engine.fifo.priv = priv; nv50_fifo_init_reset(dev); diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 6a04c158..8df5df25 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -259,7 +259,7 @@ nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(NV40_PGRAPH_CTXCTL_030C)); - return DRM_ERR(EBUSY); + return -EBUSY; } return 0; diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index 027d3ffb..ad77f441 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -49,7 +49,7 @@ nv50_instmem_init(struct drm_device *dev) priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); if (!priv) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev_priv->Engine.instmem.priv = priv; /* Save current state */ @@ -126,14 +126,14 @@ nv50_instmem_init(struct drm_device *dev) NV_WRITE(0x1700, pt >> 16); if (NV_READ(0x700000) != NV_RI32(0)) { DRM_ERROR("Failed to init PRAMIN page table\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Create a heap to manage PRAMIN aperture allocations */ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, pts, as-pts); if (ret) { DRM_ERROR("Failed to init PRAMIN heap\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } DRM_DEBUG("NV50: PRAMIN setup ok\n"); @@ -171,18 +171,18 @@ int nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) { if (gpuobj->im_backing) - return DRM_ERR(EINVAL); + return -EINVAL; *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); if (*sz == 0) - return DRM_ERR(EINVAL); + return -EINVAL; gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE, *sz, NOUVEAU_MEM_FB, (DRMFILE)-2); if (!gpuobj->im_backing) { DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } return 0; @@ -208,7 +208,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) uint32_t pte, pte_end, vram; if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_DEBUG("st=0x%0llx sz=0x%0llx\n", gpuobj->im_pramin->start, gpuobj->im_pramin->size); @@ -246,7 +246,7 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) uint32_t pte, pte_end; if (gpuobj->im_bound == 0) - return DRM_ERR(EINVAL); + return -EINVAL; pte = (gpuobj->im_pramin->start >> 12) << 3; pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index 167fc070..51b290b4 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -129,7 +129,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) @@ -146,7 +146,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) @@ -168,7 +168,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } /* ================================================================ @@ -227,7 +227,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv) DRM_ERROR("failed!\n"); r128_status(dev_priv); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } /* Start the Concurrent Command Engine. @@ -355,7 +355,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(dev_priv, 0, sizeof(drm_r128_private_t)); @@ -365,7 +365,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("PCI GART memory not allocated!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; @@ -374,7 +374,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_DEBUG("TIMEOUT problem!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->cce_mode = init->cce_mode; @@ -394,7 +394,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_DEBUG("Bad cce_mode!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } switch (init->cce_mode) { @@ -461,7 +461,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); @@ -469,21 +469,21 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("could not find mmio region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); if (!dev_priv->cce_ring) { DRM_ERROR("could not find cce ring region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); @@ -491,7 +491,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("could not find dma buffer region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } if (!dev_priv->is_pci) { @@ -501,7 +501,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("could not find agp texture region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -520,7 +520,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("Could not ioremap agp regions!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } } else #endif @@ -567,7 +567,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_ERROR("failed to init PCI GART!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); #if __OS_HAS_AGP @@ -642,7 +642,7 @@ int r128_cce_init(DRM_IOCTL_ARGS) return r128_do_cleanup_cce(dev); } - return DRM_ERR(EINVAL); + return -EINVAL; } int r128_cce_start(DRM_IOCTL_ARGS) @@ -719,7 +719,7 @@ int r128_cce_reset(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_DEBUG("%s called before init done\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } r128_do_cce_reset(dev_priv); @@ -757,7 +757,7 @@ int r128_engine_reset(DRM_IOCTL_ARGS) int r128_fullscreen(DRM_IOCTL_ARGS) { - return DRM_ERR(EINVAL); + return -EINVAL; } /* ================================================================ @@ -778,7 +778,7 @@ static int r128_freelist_init(struct drm_device * dev) dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); if (dev_priv->head == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); dev_priv->head->age = R128_BUFFER_USED; @@ -789,7 +789,7 @@ static int r128_freelist_init(struct drm_device * dev) entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); if (!entry) - return DRM_ERR(ENOMEM); + return -ENOMEM; entry->age = R128_BUFFER_FREE; entry->buf = buf; @@ -881,7 +881,7 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) /* FIXME: This is being ignored... */ DRM_ERROR("failed!\n"); - return DRM_ERR(EBUSY); + return -EBUSY; } static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) @@ -892,16 +892,16 @@ static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct dr for (i = d->granted_count; i < d->request_count; i++) { buf = r128_freelist_get(dev); if (!buf) - return DRM_ERR(EAGAIN); + return -EAGAIN; buf->filp = filp; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) - return DRM_ERR(EFAULT); + return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) - return DRM_ERR(EFAULT); + return -EFAULT; d->granted_count++; } @@ -925,7 +925,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) if (d.send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d.send_count); - return DRM_ERR(EINVAL); + return -EINVAL; } /* We'll send you buffers. @@ -933,7 +933,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) if (d.request_count < 0 || d.request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d.request_count, dma->buf_count); - return DRM_ERR(EINVAL); + return -EINVAL; } d.granted_count = 0; diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index c9abd67b..077b2763 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -428,7 +428,7 @@ do { \ DRM_UDELAY(1); \ } \ DRM_ERROR( "ring space check failed!\n" ); \ - return DRM_ERR(EBUSY); \ + return -EBUSY; \ } \ __ring_space_done: \ ; \ diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index b793d94b..6b19c4d3 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -809,7 +809,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp, break; default: DRM_ERROR("invalid blit format %d\n", blit->format); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Flush the pixel cache, and mark the contents as Read Invalid. @@ -832,11 +832,11 @@ static int r128_cce_dispatch_blit(DRMFILE filp, if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", blit->idx); - return DRM_ERR(EINVAL); + return -EINVAL; } buf_priv->discard = 1; @@ -900,22 +900,22 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, count = depth->n; if (count > 4096 || count <= 0) - return DRM_ERR(EMSGSIZE); + return -EMSGSIZE; if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { - return DRM_ERR(EFAULT); + return -EFAULT; } if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { - return DRM_ERR(EFAULT); + return -EFAULT; } buffer_size = depth->n * sizeof(u32); buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); if (buffer == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { drm_free(buffer, buffer_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } mask_size = depth->n * sizeof(u8); @@ -923,12 +923,12 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, mask = drm_alloc(mask_size, DRM_MEM_BUFS); if (mask == NULL) { drm_free(buffer, buffer_size, DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { drm_free(buffer, buffer_size, DRM_MEM_BUFS); drm_free(mask, mask_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } for (i = 0; i < count; i++, x++) { @@ -996,28 +996,28 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, count = depth->n; if (count > 4096 || count <= 0) - return DRM_ERR(EMSGSIZE); + return -EMSGSIZE; xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); x = drm_alloc(xbuf_size, DRM_MEM_BUFS); if (x == NULL) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } y = drm_alloc(ybuf_size, DRM_MEM_BUFS); if (y == NULL) { drm_free(x, xbuf_size, DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } buffer_size = depth->n * sizeof(u32); @@ -1025,13 +1025,13 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, if (buffer == NULL) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); drm_free(buffer, buffer_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } if (depth->mask) { @@ -1041,14 +1041,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); drm_free(buffer, buffer_size, DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); drm_free(buffer, buffer_size, DRM_MEM_BUFS); drm_free(mask, mask_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } for (i = 0; i < count; i++) { @@ -1115,13 +1115,13 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev, count = depth->n; if (count > 4096 || count <= 0) - return DRM_ERR(EMSGSIZE); + return -EMSGSIZE; if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { - return DRM_ERR(EFAULT); + return -EFAULT; } if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { - return DRM_ERR(EFAULT); + return -EFAULT; } BEGIN_RING(7); @@ -1159,7 +1159,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, count = depth->n; if (count > 4096 || count <= 0) - return DRM_ERR(EMSGSIZE); + return -EMSGSIZE; if (count > dev_priv->depth_pitch) { count = dev_priv->depth_pitch; @@ -1169,22 +1169,22 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, ybuf_size = count * sizeof(*y); x = drm_alloc(xbuf_size, DRM_MEM_BUFS); if (x == NULL) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } y = drm_alloc(ybuf_size, DRM_MEM_BUFS); if (y == NULL) { drm_free(x, xbuf_size, DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { drm_free(x, xbuf_size, DRM_MEM_BUFS); drm_free(y, ybuf_size, DRM_MEM_BUFS); - return DRM_ERR(EFAULT); + return -EFAULT; } for (i = 0; i < count; i++) { @@ -1363,7 +1363,7 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data, @@ -1375,12 +1375,12 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } if (vertex.prim < 0 || vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { DRM_ERROR("buffer prim %d\n", vertex.prim); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1392,11 +1392,11 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } buf->used = vertex.count; @@ -1423,7 +1423,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data, @@ -1435,11 +1435,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) if (elts.idx < 0 || elts.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", elts.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { DRM_ERROR("buffer prim %d\n", elts.prim); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1451,11 +1451,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", elts.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } count = (elts.end - elts.start) / sizeof(u16); @@ -1463,11 +1463,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) if (elts.start & 0x7) { DRM_ERROR("misaligned buffer 0x%x\n", elts.start); - return DRM_ERR(EINVAL); + return -EINVAL; } if (elts.start < buf->used) { DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); - return DRM_ERR(EINVAL); + return -EINVAL; } buf->used = elts.end; @@ -1498,7 +1498,7 @@ static int r128_cce_blit(DRM_IOCTL_ARGS) if (blit.idx < 0 || blit.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", blit.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1524,7 +1524,7 @@ static int r128_cce_depth(DRM_IOCTL_ARGS) RING_SPACE_TEST_WITH_RETURN(dev_priv); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; switch (depth.func) { case R128_WRITE_SPAN: ret = r128_cce_dispatch_write_span(dev, &depth); @@ -1557,7 +1557,7 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS) sizeof(stipple)); if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) - return DRM_ERR(EFAULT); + return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1583,7 +1583,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data, @@ -1595,7 +1595,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", indirect.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } buf = dma->buflist[indirect.idx]; @@ -1604,17 +1604,17 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", indirect.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } if (indirect.start < buf->used) { DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", indirect.start, buf->used); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1651,7 +1651,7 @@ static int r128_getparam(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data, @@ -1664,12 +1664,12 @@ static int r128_getparam(DRM_IOCTL_ARGS) value = dev->irq; break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index 9cf352ae..8fee22e9 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, if (DRM_COPY_FROM_USER_UNCHECKED (&box, &cmdbuf->boxes[n + i], sizeof(box))) { DRM_ERROR("copy cliprect faulted\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } box.x1 = @@ -263,7 +263,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * DRM_ERROR ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz); - return DRM_ERR(EINVAL); + return -EINVAL; } for (i = 0; i < sz; i++) { values[i] = ((int *)cmdbuf->buf)[i]; @@ -275,13 +275,13 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * DRM_ERROR ("Offset failed range check (reg=%04x sz=%d)\n", reg, sz); - return DRM_ERR(EINVAL); + return -EINVAL; } break; default: DRM_ERROR("Register %04x failed check as flag=%02x\n", reg + i * 4, r300_reg_flags[(reg >> 2) + i]); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -317,12 +317,12 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, return 0; if (sz * 4 > cmdbuf->bufsz) - return DRM_ERR(EINVAL); + return -EINVAL; if (reg + sz * 4 >= 0x10000) { DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz); - return DRM_ERR(EINVAL); + return -EINVAL; } if (r300_check_range(reg, sz)) { @@ -362,7 +362,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, if (!sz) return 0; if (sz * 16 > cmdbuf->bufsz) - return DRM_ERR(EINVAL); + return -EINVAL; BEGIN_RING(5 + sz * 4); /* Wait for VAP to come to senses.. */ @@ -391,7 +391,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, RING_LOCALS; if (8 * 4 > cmdbuf->bufsz) - return DRM_ERR(EINVAL); + return -EINVAL; BEGIN_RING(10); OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); @@ -421,7 +421,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, if ((count + 1) > MAX_ARRAY_PACKET) { DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); - return DRM_ERR(EINVAL); + return -EINVAL; } memset(payload, 0, MAX_ARRAY_PACKET * 4); memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); @@ -437,7 +437,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); - return DRM_ERR(EINVAL); + return -EINVAL; } k++; i++; @@ -448,7 +448,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); - return DRM_ERR(EINVAL); + return -EINVAL; } k++; i++; @@ -458,7 +458,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, DRM_ERROR ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count + 1); - return DRM_ERR(EINVAL); + return -EINVAL; } /* all clear, output packet */ @@ -492,7 +492,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -502,7 +502,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -530,12 +530,12 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, if ((cmd[1] & 0x8000ffff) != 0x80000810) { DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); - return DRM_ERR(EINVAL); + return -EINVAL; } ret = !radeon_check_offset(dev_priv, cmd[2]); if (ret) { DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(count+2); @@ -557,7 +557,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, RING_LOCALS; if (4 > cmdbuf->bufsz) - return DRM_ERR(EINVAL); + return -EINVAL; /* Fixme !! This simply emits a packet without much checking. We need to be smarter. */ @@ -568,7 +568,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, /* Is it packet 3 ? */ if ((header >> 30) != 0x3) { DRM_ERROR("Not a packet3 header (0x%08x)\n", header); - return DRM_ERR(EINVAL); + return -EINVAL; } count = (header >> 16) & 0x3fff; @@ -578,7 +578,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, DRM_ERROR ("Expected packet3 of length %d but have only %d bytes left\n", (count + 2) * 4, cmdbuf->bufsz); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Is it a packet type we know about ? */ @@ -600,7 +600,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, break; default: DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(count + 2); @@ -664,7 +664,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, DRM_ERROR("bad packet3 type %i at %p\n", header.packet3.packet, cmdbuf->buf - sizeof(header)); - return DRM_ERR(EINVAL); + return -EINVAL; } n += R300_SIMULTANEOUS_CLIPRECTS; @@ -725,11 +725,11 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, RING_LOCALS; if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) { - return DRM_ERR(EINVAL); + return -EINVAL; } if (header.scratch.reg >= 5) { - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->scratch_ages[header.scratch.reg] ++; @@ -744,21 +744,21 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, buf_idx *= 2; /* 8 bytes per buf */ if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { - return DRM_ERR(EINVAL); + return -EINVAL; } if (h_pending == 0) { - return DRM_ERR(EINVAL); + return -EINVAL; } h_pending--; if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { - return DRM_ERR(EINVAL); + return -EINVAL; } cmdbuf->buf += sizeof(buf_idx); @@ -878,7 +878,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", idx, dma->buf_count - 1); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; goto cleanup; } @@ -886,7 +886,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, if (buf->filp != filp || buf->pending) { DRM_ERROR("bad buffer %p %p %d\n", buf->filp, filp, buf->pending); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; goto cleanup; } @@ -923,7 +923,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, DRM_ERROR("bad cmd_type %i at %p\n", header.header.cmd_type, cmdbuf->buf - sizeof(header)); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; goto cleanup; } } diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index 40a20e6c..723d41c3 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -889,7 +889,7 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) @@ -910,7 +910,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) @@ -936,7 +936,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } /* ================================================================ @@ -1400,7 +1400,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) @@ -1418,7 +1418,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { DRM_ERROR("PCI GART memory not allocated!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; @@ -1426,7 +1426,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { DRM_DEBUG("TIMEOUT problem!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Enable vblank on CRTC1 for older X servers @@ -1455,7 +1455,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } switch (init->fb_bpp) { @@ -1524,27 +1524,27 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); if (!dev_priv->cp_ring) { DRM_ERROR("could not find cp ring region!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } if (init->gart_textures_offset) { @@ -1553,7 +1553,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) if (!dev_priv->gart_textures) { DRM_ERROR("could not find GART texture region!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -1571,7 +1571,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) !dev->agp_buffer_map->handle) { DRM_ERROR("could not find ioremap agp regions!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } else #endif @@ -1725,14 +1725,14 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) DRM_ERROR ("Cannot use PCI Express without GART in FB memory\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { DRM_ERROR("failed to init PCI GART!\n"); radeon_do_cleanup_cp(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } /* Turn on PCI GART */ @@ -1812,7 +1812,7 @@ static int radeon_do_resume_cp(struct drm_device * dev) if (!dev_priv) { DRM_ERROR("Called with no initialization\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_DEBUG("Starting radeon_do_resume_cp()\n"); @@ -1860,7 +1860,7 @@ int radeon_cp_init(DRM_IOCTL_ARGS) return radeon_do_cleanup_cp(dev); } - return DRM_ERR(EINVAL); + return -EINVAL; } int radeon_cp_start(DRM_IOCTL_ARGS) @@ -1993,7 +1993,7 @@ int radeon_cp_reset(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_DEBUG("%s called before init done\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } radeon_do_cp_reset(dev_priv); @@ -2187,7 +2187,7 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) radeon_status(dev_priv); DRM_ERROR("failed!\n"); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, @@ -2199,16 +2199,16 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); if (!buf) - return DRM_ERR(EBUSY); /* NOTE: broken client */ + return -EBUSY; /* NOTE: broken client */ buf->filp = filp; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) - return DRM_ERR(EFAULT); + return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) - return DRM_ERR(EFAULT); + return -EFAULT; d->granted_count++; } @@ -2232,7 +2232,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) if (d.send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d.send_count); - return DRM_ERR(EINVAL); + return -EINVAL; } /* We'll send you buffers. @@ -2240,7 +2240,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) if (d.request_count < 0 || d.request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d.request_count, dma->buf_count); - return DRM_ERR(EINVAL); + return -EINVAL; } d.granted_count = 0; @@ -2261,7 +2261,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(dev_priv, 0, sizeof(drm_radeon_private_t)); dev->dev_private = (void *)dev_priv; diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index b973b968..8266d11a 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -156,7 +156,7 @@ static int radeon_driver_vblank_do_wait(struct drm_device * dev, atomic_t *counter; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } if (crtc == DRM_RADEON_VBLANK_CRTC1) { @@ -166,7 +166,7 @@ static int radeon_driver_vblank_do_wait(struct drm_device * dev, counter = &dev->vbl_received2; ack |= RADEON_CRTC2_VBLANK_STAT; } else - return DRM_ERR(EINVAL); + return -EINVAL; radeon_acknowledge_irqs(dev_priv, ack); @@ -208,7 +208,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data, @@ -218,7 +218,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -234,7 +234,7 @@ int radeon_irq_wait(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, @@ -321,7 +321,7 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->vblank_crtc = (unsigned int)value; radeon_enable_interrupt(dev); diff --git a/shared-core/radeon_mem.c b/shared-core/radeon_mem.c index dbc91c9f..63d4b8c9 100644 --- a/shared-core/radeon_mem.c +++ b/shared-core/radeon_mem.c @@ -137,12 +137,12 @@ static int init_heap(struct mem_block **heap, int start, int size) struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); if (!blocks) - return DRM_ERR(ENOMEM); + return -ENOMEM; *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); if (!*heap) { drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); + return -ENOMEM; } blocks->start = start; @@ -226,7 +226,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, @@ -234,7 +234,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) heap = get_heap(dev_priv, alloc.region); if (!heap || !*heap) - return DRM_ERR(EFAULT); + return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. @@ -245,11 +245,11 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) block = alloc_block(*heap, alloc.size, alloc.alignment, filp); if (!block) - return DRM_ERR(ENOMEM); + return -ENOMEM; if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -264,7 +264,7 @@ int radeon_mem_free(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, @@ -272,14 +272,14 @@ int radeon_mem_free(DRM_IOCTL_ARGS) heap = get_heap(dev_priv, memfree.region); if (!heap || !*heap) - return DRM_ERR(EFAULT); + return -EFAULT; block = find_block(*heap, memfree.region_offset); if (!block) - return DRM_ERR(EFAULT); + return -EFAULT; if (block->filp != filp) - return DRM_ERR(EPERM); + return -EPERM; free_block(block); return 0; @@ -294,7 +294,7 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(initheap, @@ -303,11 +303,11 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS) heap = get_heap(dev_priv, initheap.region); if (!heap) - return DRM_ERR(EFAULT); + return -EFAULT; if (*heap) { DRM_ERROR("heap already initialized?"); - return DRM_ERR(EFAULT); + return -EFAULT; } return init_heap(heap, initheap.start, initheap.size); diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 13b09d44..e351656d 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -85,7 +85,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * *offset = off; return 0; } - return DRM_ERR(EINVAL); + return -EINVAL; } static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * @@ -99,7 +99,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { DRM_ERROR("Invalid depth buffer offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; @@ -107,7 +107,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { DRM_ERROR("Invalid colour buffer offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; @@ -120,7 +120,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[0])) { DRM_ERROR("Invalid R200 texture offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; @@ -130,7 +130,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { DRM_ERROR("Invalid R100 texture offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; @@ -147,7 +147,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * &data[i])) { DRM_ERROR ("Invalid R200 cubic texture offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } } break; @@ -163,7 +163,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * &data[i])) { DRM_ERROR ("Invalid R100 cubic texture offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } } } @@ -256,7 +256,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * default: DRM_ERROR("Unknown state packet ID %d\n", id); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -277,12 +277,12 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { DRM_ERROR("Not a type 3 packet\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (4 * *cmdsz > cmdbuf->bufsz) { DRM_ERROR("Packet size larger than size of data provided\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } switch(cmd[0] & 0xff00) { @@ -307,7 +307,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * /* safe but r200 only */ if (dev_priv->microcode_version != UCODE_R200) { DRM_ERROR("Invalid 3d packet for r100-class chip\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; @@ -317,7 +317,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if (count > 18) { /* 12 arrays max */ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); - return DRM_ERR(EINVAL); + return -EINVAL; } /* carefully check packet contents */ @@ -330,7 +330,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); - return DRM_ERR(EINVAL); + return -EINVAL; } k++; i++; @@ -341,7 +341,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); - return DRM_ERR(EINVAL); + return -EINVAL; } k++; i++; @@ -351,33 +351,33 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count + 1); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case RADEON_3D_RNDR_GEN_INDX_PRIM: if (dev_priv->microcode_version != UCODE_R100) { DRM_ERROR("Invalid 3d packet for r200-class chip\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { DRM_ERROR("Invalid rndr_gen_indx offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case RADEON_CP_INDX_BUFFER: if (dev_priv->microcode_version != UCODE_R200) { DRM_ERROR("Invalid 3d packet for r100-class chip\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if ((cmd[1] & 0x8000ffff) != 0x80000810) { DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); - return DRM_ERR(EINVAL); + return -EINVAL; } if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); - return DRM_ERR(EINVAL); + return -EINVAL; } break; @@ -391,7 +391,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if (radeon_check_and_fixup_offset (dev_priv, filp_priv, &offset)) { DRM_ERROR("Invalid first packet offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; } @@ -402,7 +402,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if (radeon_check_and_fixup_offset (dev_priv, filp_priv, &offset)) { DRM_ERROR("Invalid second packet offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; } @@ -410,7 +410,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * default: DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -451,13 +451,13 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &ctx->rb3d_depthoffset)) { DRM_ERROR("Invalid depth buffer offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &ctx->rb3d_coloroffset)) { DRM_ERROR("Invalid depth buffer offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(14); @@ -546,7 +546,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex[0].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 0\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(9); @@ -566,7 +566,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex[1].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 1\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(9); @@ -586,7 +586,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex[2].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 2\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(9); @@ -1668,7 +1668,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) { DRM_ERROR("Invalid destination offset\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; @@ -1711,11 +1711,11 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, break; default: DRM_ERROR("invalid texture format %d\n", tex->format); - return DRM_ERR(EINVAL); + return -EINVAL; } spitch = blit_width >> 6; if (spitch == 0 && image->height > 1) - return DRM_ERR(EINVAL); + return -EINVAL; texpitch = tex->pitch; if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { @@ -1760,8 +1760,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, if (!buf) { DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) - return DRM_ERR(EFAULT); - return DRM_ERR(EAGAIN); + return -EFAULT; + return -EAGAIN; } /* Dispatch the indirect buffer. @@ -1774,7 +1774,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, do { \ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ - return DRM_ERR(EFAULT); \ + return -EFAULT; \ } \ } while(0) @@ -2080,7 +2080,7 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(alloc, @@ -2088,7 +2088,7 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS) sizeof(alloc)); if (alloc_surface(&alloc, dev_priv, filp) == -1) - return DRM_ERR(EINVAL); + return -EINVAL; else return 0; } @@ -2101,14 +2101,14 @@ static int radeon_surface_free(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data, sizeof(memfree)); if (free_surface(filp, dev_priv, memfree.address)) - return DRM_ERR(EINVAL); + return -EINVAL; else return 0; } @@ -2134,7 +2134,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS) if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]))) - return DRM_ERR(EFAULT); + return -EFAULT; radeon_cp_dispatch_clear(dev, &clear, depth_boxes); @@ -2227,7 +2227,7 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } sarea_priv = dev_priv->sarea_priv; @@ -2243,11 +2243,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { DRM_ERROR("buffer prim %d\n", vertex.prim); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2258,11 +2258,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Build up a prim_t record: @@ -2276,7 +2276,7 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) sarea_priv->tex_state, sarea_priv->dirty)) { DRM_ERROR("radeon_emit_state failed\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | @@ -2318,7 +2318,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } sarea_priv = dev_priv->sarea_priv; @@ -2333,11 +2333,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) if (elts.idx < 0 || elts.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", elts.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { DRM_ERROR("buffer prim %d\n", elts.prim); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2348,11 +2348,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", elts.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } count = (elts.end - elts.start) / sizeof(u16); @@ -2360,11 +2360,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) if (elts.start & 0x7) { DRM_ERROR("misaligned buffer 0x%x\n", elts.start); - return DRM_ERR(EINVAL); + return -EINVAL; } if (elts.start < buf->used) { DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); - return DRM_ERR(EINVAL); + return -EINVAL; } buf->used = elts.end; @@ -2375,7 +2375,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) sarea_priv->tex_state, sarea_priv->dirty)) { DRM_ERROR("radeon_emit_state failed\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | @@ -2417,13 +2417,13 @@ static int radeon_cp_texture(DRM_IOCTL_ARGS) if (tex.image == NULL) { DRM_ERROR("null texture image!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_FROM_USER(&image, (drm_radeon_tex_image_t __user *) tex.image, sizeof(image))) - return DRM_ERR(EFAULT); + return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); @@ -2447,7 +2447,7 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS) sizeof(stipple)); if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) - return DRM_ERR(EFAULT); + return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2470,7 +2470,7 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(indirect, @@ -2483,7 +2483,7 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", indirect.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } buf = dma->buflist[indirect.idx]; @@ -2491,17 +2491,17 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", indirect.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } if (indirect.start < buf->used) { DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", indirect.start, buf->used); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2547,7 +2547,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } sarea_priv = dev_priv->sarea_priv; @@ -2563,7 +2563,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); + return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2574,23 +2574,23 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex.idx); - return DRM_ERR(EINVAL); + return -EINVAL; } if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) - return DRM_ERR(EINVAL); + return -EINVAL; for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { drm_radeon_prim_t prim; drm_radeon_tcl_prim_t tclprim; if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) - return DRM_ERR(EFAULT); + return -EFAULT; if (prim.stateidx != laststate) { drm_radeon_state_t state; @@ -2598,11 +2598,11 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) if (DRM_COPY_FROM_USER(&state, &vertex.state[prim.stateidx], sizeof(state))) - return DRM_ERR(EFAULT); + return -EFAULT; if (radeon_emit_state2(dev_priv, filp_priv, &state)) { DRM_ERROR("radeon_emit_state2 failed\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } laststate = prim.stateidx; @@ -2648,19 +2648,19 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv, RING_LOCALS; if (id >= RADEON_MAX_STATE_PACKETS) - return DRM_ERR(EINVAL); + return -EINVAL; sz = packet[id].len; reg = packet[id].start; if (sz * sizeof(int) > cmdbuf->bufsz) { DRM_ERROR("Packet size provided larger than data provided\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { DRM_ERROR("Packet verification failed\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } BEGIN_RING(sz + 1); @@ -2748,7 +2748,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, if (!sz) return 0; if (sz * 4 > cmdbuf->bufsz) - return DRM_ERR(EINVAL); + return -EINVAL; BEGIN_RING(5 + sz); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); @@ -2816,7 +2816,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev, do { if (i < cmdbuf->nbox) { if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) - return DRM_ERR(EFAULT); + return -EFAULT; /* FIXME The second and subsequent times round * this loop, send a WAIT_UNTIL_3D_IDLE before * calling emit_clip_rect(). This fixes a @@ -2874,7 +2874,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags) ADVANCE_RING(); break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; @@ -2897,7 +2897,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); @@ -2910,7 +2910,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) VB_AGE_TEST_WITH_RETURN(dev_priv); if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { - return DRM_ERR(EINVAL); + return -EINVAL; } /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid @@ -2921,11 +2921,11 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) if (orig_bufsz != 0) { kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); if (kbuf == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) { drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); - return DRM_ERR(EFAULT); + return -EFAULT; } cmdbuf.buf = kbuf; } @@ -3052,7 +3052,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) err: if (orig_bufsz != 0) drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); - return DRM_ERR(EINVAL); + return -EINVAL; } static int radeon_cp_getparam(DRM_IOCTL_ARGS) @@ -3064,7 +3064,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data, @@ -3119,7 +3119,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) break; case RADEON_PARAM_SCRATCH_OFFSET: if (!dev_priv->writeback_works) - return DRM_ERR(EINVAL); + return -EINVAL; value = RADEON_SCRATCH_REG_OFFSET; break; @@ -3136,12 +3136,12 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) break; default: DRM_DEBUG( "Invalid parameter %d\n", param.param ); - return DRM_ERR(EINVAL); + return -EINVAL; } if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; @@ -3157,7 +3157,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); @@ -3200,7 +3200,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) break; default: DRM_DEBUG("Invalid parameter %d\n", sp.param); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index a3fd8994..1835d758 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -60,7 +60,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int @@ -81,7 +81,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n) DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int @@ -102,7 +102,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n) DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } /* @@ -136,7 +136,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e) DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } static int @@ -158,7 +158,7 @@ savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e) DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif - return DRM_ERR(EBUSY); + return -EBUSY; } uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, @@ -301,7 +301,7 @@ static int savage_dma_init(drm_savage_private_t *dev_priv) dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, DRM_MEM_DRIVER); if (dev_priv->dma_pages == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); @@ -541,7 +541,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(dev_priv, 0, sizeof(drm_savage_private_t)); dev->dev_private = (void *)dev_priv; @@ -681,16 +681,16 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) if (init->fb_bpp != 16 && init->fb_bpp != 32) { DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (init->depth_bpp != 16 && init->depth_bpp != 32) { DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); - return DRM_ERR(EINVAL); + return -EINVAL; } if (init->dma_type != SAVAGE_DMA_AGP && init->dma_type != SAVAGE_DMA_PCI) { DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->cob_size = init->cob_size; @@ -714,14 +714,14 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } if (init->status_offset != 0) { dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("could not find shadow status region!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } else { dev_priv->status = NULL; @@ -733,13 +733,13 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) if (!dev->agp_buffer_map) { DRM_ERROR("could not find DMA buffer region!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } drm_core_ioremap(dev->agp_buffer_map, dev); if (!dev->agp_buffer_map) { DRM_ERROR("failed to ioremap DMA buffer region!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } } if (init->agp_textures_offset) { @@ -748,7 +748,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) if (!dev_priv->agp_textures) { DRM_ERROR("could not find agp texture region!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } else { dev_priv->agp_textures = NULL; @@ -759,39 +759,39 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) DRM_ERROR("command DMA not supported on " "Savage3D/MX/IX.\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev->dma && dev->dma->buflist) { DRM_ERROR("command and vertex DMA not supported " "at the same time.\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); if (!dev_priv->cmd_dma) { DRM_ERROR("could not find command DMA region!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP) { if (dev_priv->cmd_dma->type != _DRM_AGP) { DRM_ERROR("AGP command DMA region is not a " "_DRM_AGP map!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } drm_core_ioremap(dev_priv->cmd_dma, dev); if (!dev_priv->cmd_dma->handle) { DRM_ERROR("failed to ioremap command " "DMA region!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { DRM_ERROR("PCI command DMA region is not a " "_DRM_CONSISTENT map!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(EINVAL); + return -EINVAL; } } else { dev_priv->cmd_dma = NULL; @@ -808,7 +808,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) if (!dev_priv->fake_dma.handle) { DRM_ERROR("could not allocate faked DMA buffer!\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->cmd_dma = &dev_priv->fake_dma; dev_priv->dma_flush = savage_fake_dma_flush; @@ -885,13 +885,13 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) if (savage_freelist_init(dev) < 0) { DRM_ERROR("could not initialize freelist\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (savage_dma_init(dev_priv) < 0) { DRM_ERROR("could not initialize command DMA\n"); savage_do_cleanup_bci(dev); - return DRM_ERR(ENOMEM); + return -ENOMEM; } return 0; @@ -944,7 +944,7 @@ static int savage_bci_init(DRM_IOCTL_ARGS) return savage_do_cleanup_bci(dev); } - return DRM_ERR(EINVAL); + return -EINVAL; } static int savage_bci_event_emit(DRM_IOCTL_ARGS) @@ -1014,16 +1014,16 @@ static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct d for (i = d->granted_count; i < d->request_count; i++) { buf = savage_freelist_get(dev); if (!buf) - return DRM_ERR(EAGAIN); + return -EAGAIN; buf->filp = filp; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) - return DRM_ERR(EFAULT); + return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) - return DRM_ERR(EFAULT); + return -EFAULT; d->granted_count++; } @@ -1046,7 +1046,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) if (d.send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d.send_count); - return DRM_ERR(EINVAL); + return -EINVAL; } /* We'll send you buffers. @@ -1054,7 +1054,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) if (d.request_count < 0 || d.request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d.request_count, dma->buf_count); - return DRM_ERR(EINVAL); + return -EINVAL; } d.granted_count = 0; diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 290796ee..753fe7d3 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -83,7 +83,7 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, { if ((addr & 6) != 2) { /* reserved bits */ DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); - return DRM_ERR(EINVAL); + return -EINVAL; } if (!(addr & 1)) { /* local */ addr &= ~7; @@ -92,13 +92,13 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, DRM_ERROR ("bad texAddr%d %08x (local addr out of range)\n", unit, addr); - return DRM_ERR(EINVAL); + return -EINVAL; } } else { /* AGP */ if (!dev_priv->agp_textures) { DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", unit, addr); - return DRM_ERR(EINVAL); + return -EINVAL; } addr &= ~7; if (addr < dev_priv->agp_textures->offset || @@ -107,7 +107,7 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, DRM_ERROR ("bad texAddr%d %08x (AGP addr out of range)\n", unit, addr); - return DRM_ERR(EINVAL); + return -EINVAL; } } return 0; @@ -132,7 +132,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", start, start+count-1); - return DRM_ERR(EINVAL); + return -EINVAL; } SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, @@ -164,7 +164,7 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv, start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) { DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", start, start+count-1); - return DRM_ERR(EINVAL); + return -EINVAL; } SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, @@ -287,7 +287,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, if (!dmabuf) { DRM_ERROR("called without dma buffers!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (!n) @@ -301,7 +301,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: @@ -310,18 +310,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, DRM_ERROR ("wrong number of vertices %u in TRIFAN/STRIP\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); - return DRM_ERR(EINVAL); + return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip != 0) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } } else { unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - @@ -329,18 +329,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } if (reorder) { DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } } if (start + n > dmabuf->total/32) { DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", start, start + n - 1, dmabuf->total/32); - return DRM_ERR(EINVAL); + return -EINVAL; } /* Vertex DMA doesn't work with command DMA at the same time, @@ -438,7 +438,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: @@ -447,24 +447,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, DRM_ERROR ("wrong number of vertices %u in TRIFAN/STRIP\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); - return DRM_ERR(EINVAL); + return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip > SAVAGE_SKIP_ALL_S3D) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } vtx_size = 8; /* full vertex */ } else { if (skip > SAVAGE_SKIP_ALL_S4) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } vtx_size = 10; /* full vertex */ } @@ -476,13 +476,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, if (vtx_size > vb_stride) { DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", vtx_size, vb_stride); - return DRM_ERR(EINVAL); + return -EINVAL; } if (start + n > vb_size / (vb_stride*4)) { DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", start, start + n - 1, vb_size / (vb_stride*4)); - return DRM_ERR(EINVAL); + return -EINVAL; } prim <<= 25; @@ -545,7 +545,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, if (!dmabuf) { DRM_ERROR("called without dma buffers!\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } if (!n) @@ -558,7 +558,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: @@ -566,18 +566,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, if (n < 3) { DRM_ERROR ("wrong number of indices %u in TRIFAN/STRIP\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); - return DRM_ERR(EINVAL); + return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip != 0) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } } else { unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - @@ -585,11 +585,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } if (reorder) { DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -626,7 +626,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, if (idx[i] > dmabuf->total/32) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", i, idx[i], dmabuf->total/32); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -696,7 +696,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: @@ -704,24 +704,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, if (n < 3) { DRM_ERROR ("wrong number of indices %u in TRIFAN/STRIP\n", n); - return DRM_ERR(EINVAL); + return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); - return DRM_ERR(EINVAL); + return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip > SAVAGE_SKIP_ALL_S3D) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } vtx_size = 8; /* full vertex */ } else { if (skip > SAVAGE_SKIP_ALL_S4) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); - return DRM_ERR(EINVAL); + return -EINVAL; } vtx_size = 10; /* full vertex */ } @@ -733,7 +733,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, if (vtx_size > vb_stride) { DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", vtx_size, vb_stride); - return DRM_ERR(EINVAL); + return -EINVAL; } prim <<= 25; @@ -746,7 +746,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, if (idx[i] > vb_size / (vb_stride*4)) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", i, idx[i], vb_size / (vb_stride*4)); - return DRM_ERR(EINVAL); + return -EINVAL; } } @@ -941,7 +941,7 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv, DRM_ERROR("IMPLEMENTATION ERROR: " "non-drawing-command %d\n", cmd_header.cmd.cmd); - return DRM_ERR(EINVAL); + return -EINVAL; } if (ret != 0) @@ -978,7 +978,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DRM_ERROR ("vertex buffer index %u out of range (0-%u)\n", cmdbuf.dma_idx, dma->buf_count-1); - return DRM_ERR(EINVAL); + return -EINVAL; } dmabuf = dma->buflist[cmdbuf.dma_idx]; } else { @@ -993,26 +993,26 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) if (cmdbuf.size) { kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); if (kcmd_addr == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, cmdbuf.size * 8)) { drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); - return DRM_ERR(EFAULT); + return -EFAULT; } cmdbuf.cmd_addr = kcmd_addr; } if (cmdbuf.vb_size) { kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); if (kvb_addr == NULL) { - ret = DRM_ERR(ENOMEM); + ret = -ENOMEM; goto done; } if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, cmdbuf.vb_size)) { - ret = DRM_ERR(EFAULT); + ret = -EFAULT; goto done; } cmdbuf.vb_addr = kvb_addr; @@ -1021,13 +1021,13 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); if (kbox_addr == NULL) { - ret = DRM_ERR(ENOMEM); + ret = -ENOMEM; goto done; } if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect))) { - ret = DRM_ERR(EFAULT); + ret = -EFAULT; goto done; } cmdbuf.box_addr = kbox_addr; @@ -1060,7 +1060,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DRM_ERROR("indexed drawing command extends " "beyond end of command buffer\n"); DMA_FLUSH(); - return DRM_ERR(EINVAL); + return -EINVAL; } /* fall through */ case SAVAGE_CMD_DMA_PRIM: @@ -1093,7 +1093,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DRM_ERROR("command SAVAGE_CMD_STATE extends " "beyond end of command buffer\n"); DMA_FLUSH(); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; goto done; } ret = savage_dispatch_state(dev_priv, &cmd_header, @@ -1106,7 +1106,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DRM_ERROR("command SAVAGE_CMD_CLEAR extends " "beyond end of command buffer\n"); DMA_FLUSH(); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; goto done; } ret = savage_dispatch_clear(dev_priv, &cmd_header, @@ -1122,7 +1122,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) default: DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); DMA_FLUSH(); - ret = DRM_ERR(EINVAL); + ret = -EINVAL; goto done; } diff --git a/shared-core/sis_mm.c b/shared-core/sis_mm.c index 6d074d6f..b62e3e27 100644 --- a/shared-core/sis_mm.c +++ b/shared-core/sis_mm.c @@ -104,7 +104,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS) if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { DRM_DEBUG("adding to allocation set fails\n"); sis_free(req.offset); - retval = DRM_ERR(EINVAL); + retval = -EINVAL; } } else { fb.offset = 0; @@ -127,10 +127,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb)); if (!fb.free) - return DRM_ERR(EINVAL); + return -EINVAL; if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) - retval = DRM_ERR(EINVAL); + retval = -EINVAL; sis_free(fb.free); DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free); @@ -167,7 +167,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } if (dev_priv->FBHeap != NULL) - return DRM_ERR(EINVAL); + return -EINVAL; dev_priv->FBHeap = mmInit(fb.offset, fb.size); @@ -186,7 +186,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS) int retval = 0; if (dev_priv == NULL || dev_priv->FBHeap == NULL) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); @@ -198,7 +198,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS) if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock) fb.free); - retval = DRM_ERR(EINVAL); + retval = -EINVAL; } } else { fb.offset = 0; @@ -220,15 +220,15 @@ static int sis_fb_free(DRM_IOCTL_ARGS) drm_sis_mem_t fb; if (dev_priv == NULL || dev_priv->FBHeap == NULL) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb)); if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free)) - return DRM_ERR(EINVAL); + return -EINVAL; if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) - return DRM_ERR(EINVAL); + return -EINVAL; mmFreeMem((PMemBlock) fb.free); DRM_DEBUG("free fb, free = 0x%lx\n", fb.free); @@ -255,7 +255,7 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) } if (dev_priv->AGPHeap != NULL) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, sizeof(agp)); @@ -277,7 +277,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) int retval = 0; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp)); @@ -311,17 +311,17 @@ static int sis_ioctl_agp_free(DRM_IOCTL_ARGS) drm_sis_mem_t agp; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data, sizeof(agp)); if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free)) - return DRM_ERR(EINVAL); + return -EINVAL; mmFreeMem((PMemBlock) agp.free); if (!del_alloc_set(agp.context, AGP_TYPE, agp.free)) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_DEBUG("free agp, free = 0x%lx\n", agp.free); diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 48f46938..895c78bf 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -163,24 +163,24 @@ static int via_initialize(struct drm_device * dev, { if (!dev_priv || !dev_priv->mmio) { DRM_ERROR("via_dma_init called before via_map_init\n"); - return DRM_ERR(EFAULT); + return -EFAULT; } if (dev_priv->ring.virtual_start != NULL) { DRM_ERROR("%s called again without calling cleanup\n", __FUNCTION__); - return DRM_ERR(EFAULT); + return -EFAULT; } if (!dev->agp || !dev->agp->base) { DRM_ERROR("%s called with no agp memory available\n", __FUNCTION__); - return DRM_ERR(EFAULT); + return -EFAULT; } if (dev_priv->chipset == VIA_DX9_0) { DRM_ERROR("AGP DMA is not supported on this chip\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } dev_priv->ring.map.offset = dev->agp->base + init->offset; @@ -195,7 +195,7 @@ static int via_initialize(struct drm_device * dev, via_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; @@ -228,22 +228,22 @@ static int via_dma_init(DRM_IOCTL_ARGS) switch (init.func) { case VIA_INIT_DMA: if (!DRM_SUSER(DRM_CURPROC)) - retcode = DRM_ERR(EPERM); + retcode = -EPERM; else retcode = via_initialize(dev, dev_priv, &init); break; case VIA_CLEANUP_DMA: if (!DRM_SUSER(DRM_CURPROC)) - retcode = DRM_ERR(EPERM); + retcode = -EPERM; else retcode = via_dma_cleanup(dev); break; case VIA_DMA_INITIALIZED: retcode = (dev_priv->ring.virtual_start != NULL) ? - 0 : DRM_ERR(EFAULT); + 0 : -EFAULT; break; default: - retcode = DRM_ERR(EINVAL); + retcode = -EINVAL; break; } @@ -263,15 +263,15 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * if (dev_priv->ring.virtual_start == NULL) { DRM_ERROR("%s called without initializing AGP ring buffer.\n", __FUNCTION__); - return DRM_ERR(EFAULT); + return -EFAULT; } if (cmd->size > VIA_PCI_BUF_SIZE) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) - return DRM_ERR(EFAULT); + return -EFAULT; /* * Running this function on AGP memory is dead slow. Therefore @@ -287,7 +287,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); if (vb == NULL) { - return DRM_ERR(EAGAIN); + return -EAGAIN; } memcpy(vb, dev_priv->pci_buf, cmd->size); @@ -311,7 +311,7 @@ int via_driver_dma_quiescent(struct drm_device * dev) drm_via_private_t *dev_priv = dev->dev_private; if (!via_wait_idle(dev_priv)) { - return DRM_ERR(EBUSY); + return -EBUSY; } return 0; } @@ -353,10 +353,10 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, int ret; if (cmd->size > VIA_PCI_BUF_SIZE) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) - return DRM_ERR(EFAULT); + return -EFAULT; if ((ret = via_verify_command_stream((uint32_t *) dev_priv->pci_buf, @@ -661,7 +661,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) if (dev_priv->ring.virtual_start == NULL) { DRM_ERROR("%s called without initializing AGP ring buffer.\n", __FUNCTION__); - return DRM_ERR(EFAULT); + return -EFAULT; } DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data, @@ -680,7 +680,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) } if (!count) { DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); - ret = DRM_ERR(EAGAIN); + ret = -EAGAIN; } break; case VIA_CMDBUF_LAG: @@ -692,11 +692,11 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) } if (!count) { DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); - ret = DRM_ERR(EAGAIN); + ret = -EAGAIN; } break; default: - ret = DRM_ERR(EFAULT); + ret = -EFAULT; } d_siz.size = tmp_size; @@ -709,12 +709,12 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) int via_dma_blit_sync( DRM_IOCTL_ARGS ) { DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } int via_dma_blit( DRM_IOCTL_ARGS ) { DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #endif diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index 040df548..68ee5226 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -213,13 +213,13 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } if (irq >= drm_via_irq_num ) { DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, irq); - return DRM_ERR(EINVAL); + return -EINVAL; } real_irq = dev_priv->irq_map[irq]; @@ -227,7 +227,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc if (real_irq < 0) { DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", __FUNCTION__, irq); - return DRM_ERR(EINVAL); + return -EINVAL; } masks = dev_priv->irq_masks; @@ -352,13 +352,13 @@ int via_wait_irq(DRM_IOCTL_ARGS) int force_sequence; if (!dev->irq) - return DRM_ERR(EINVAL); + return -EINVAL; DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); if (irqwait.request.irq >= dev_priv->num_irqs) { DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, irqwait.request.irq); - return DRM_ERR(EINVAL); + return -EINVAL; } cur_irq += irqwait.request.irq; @@ -370,13 +370,13 @@ int via_wait_irq(DRM_IOCTL_ARGS) case VIA_IRQ_ABSOLUTE: break; default: - return DRM_ERR(EINVAL); + return -EINVAL; } if (irqwait.request.type & VIA_IRQ_SIGNAL) { DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", __FUNCTION__); - return DRM_ERR(EINVAL); + return -EINVAL; } force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 2381eaa9..1aed10f5 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -118,7 +118,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev->dev_private = (void *)dev_priv; diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index 038bea2f..ded5c4e1 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -1031,12 +1031,12 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, case state_error: default: *hc_state = saved_state; - return DRM_ERR(EINVAL); + return -EINVAL; } } if (state == state_error) { *hc_state = saved_state; - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; } @@ -1087,11 +1087,11 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, break; case state_error: default: - return DRM_ERR(EINVAL); + return -EINVAL; } } if (state == state_error) { - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; } From 35de4868361ce1fb515cf33f27e6be4c59b07f89 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 20 Jul 2007 06:42:18 -0700 Subject: [PATCH 163/437] BSD: Replace symlink building with symlinks in git. --- .gitignore | 58 ------------------------------------- bsd-core/Makefile | 64 ++--------------------------------------- bsd-core/drm.h | 1 + bsd-core/drm_sarea.h | 1 + bsd-core/i915_dma.c | 1 + bsd-core/i915_drm.h | 1 + bsd-core/i915_drv.h | 1 + bsd-core/i915_irq.c | 1 + bsd-core/i915_mem.c | 1 + bsd-core/mach64_dma.c | 1 + bsd-core/mach64_drm.h | 1 + bsd-core/mach64_drv.h | 1 + bsd-core/mach64_irq.c | 1 + bsd-core/mach64_state.c | 1 + bsd-core/mga_dma.c | 1 + bsd-core/mga_drm.h | 1 + bsd-core/mga_drv.h | 1 + bsd-core/mga_irq.c | 1 + bsd-core/mga_state.c | 1 + bsd-core/mga_ucode.h | 1 + bsd-core/mga_warp.c | 1 + bsd-core/r128_cce.c | 1 + bsd-core/r128_drm.h | 1 + bsd-core/r128_drv.h | 1 + bsd-core/r128_irq.c | 1 + bsd-core/r128_state.c | 1 + bsd-core/r300_cmdbuf.c | 1 + bsd-core/r300_reg.h | 1 + bsd-core/radeon_cp.c | 1 + bsd-core/radeon_drm.h | 1 + bsd-core/radeon_drv.h | 1 + bsd-core/radeon_irq.c | 1 + bsd-core/radeon_mem.c | 1 + bsd-core/radeon_state.c | 1 + bsd-core/savage_bci.c | 1 + bsd-core/savage_drm.h | 1 + bsd-core/savage_drv.h | 1 + bsd-core/savage_state.c | 1 + bsd-core/sis_drm.h | 1 + bsd-core/sis_drv.h | 1 + bsd-core/sis_ds.c | 1 + bsd-core/sis_ds.h | 1 + bsd-core/sis_mm.c | 1 + bsd-core/tdfx_drv.h | 1 + bsd-core/via_3d_reg.h | 1 + bsd-core/via_dma.c | 1 + bsd-core/via_drm.h | 1 + bsd-core/via_drv.h | 1 + bsd-core/via_ds.c | 1 + bsd-core/via_ds.h | 1 + bsd-core/via_irq.c | 1 + bsd-core/via_map.c | 1 + bsd-core/via_mm.c | 1 + bsd-core/via_mm.h | 1 + bsd-core/via_verifier.c | 1 + bsd-core/via_verifier.h | 1 + bsd-core/via_video.c | 1 + 57 files changed, 57 insertions(+), 120 deletions(-) create mode 120000 bsd-core/drm.h create mode 120000 bsd-core/drm_sarea.h create mode 120000 bsd-core/i915_dma.c create mode 120000 bsd-core/i915_drm.h create mode 120000 bsd-core/i915_drv.h create mode 120000 bsd-core/i915_irq.c create mode 120000 bsd-core/i915_mem.c create mode 120000 bsd-core/mach64_dma.c create mode 120000 bsd-core/mach64_drm.h create mode 120000 bsd-core/mach64_drv.h create mode 120000 bsd-core/mach64_irq.c create mode 120000 bsd-core/mach64_state.c create mode 120000 bsd-core/mga_dma.c create mode 120000 bsd-core/mga_drm.h create mode 120000 bsd-core/mga_drv.h create mode 120000 bsd-core/mga_irq.c create mode 120000 bsd-core/mga_state.c create mode 120000 bsd-core/mga_ucode.h create mode 120000 bsd-core/mga_warp.c create mode 120000 bsd-core/r128_cce.c create mode 120000 bsd-core/r128_drm.h create mode 120000 bsd-core/r128_drv.h create mode 120000 bsd-core/r128_irq.c create mode 120000 bsd-core/r128_state.c create mode 120000 bsd-core/r300_cmdbuf.c create mode 120000 bsd-core/r300_reg.h create mode 120000 bsd-core/radeon_cp.c create mode 120000 bsd-core/radeon_drm.h create mode 120000 bsd-core/radeon_drv.h create mode 120000 bsd-core/radeon_irq.c create mode 120000 bsd-core/radeon_mem.c create mode 120000 bsd-core/radeon_state.c create mode 120000 bsd-core/savage_bci.c create mode 120000 bsd-core/savage_drm.h create mode 120000 bsd-core/savage_drv.h create mode 120000 bsd-core/savage_state.c create mode 120000 bsd-core/sis_drm.h create mode 120000 bsd-core/sis_drv.h create mode 120000 bsd-core/sis_ds.c create mode 120000 bsd-core/sis_ds.h create mode 120000 bsd-core/sis_mm.c create mode 120000 bsd-core/tdfx_drv.h create mode 120000 bsd-core/via_3d_reg.h create mode 120000 bsd-core/via_dma.c create mode 120000 bsd-core/via_drm.h create mode 120000 bsd-core/via_drv.h create mode 120000 bsd-core/via_ds.c create mode 120000 bsd-core/via_ds.h create mode 120000 bsd-core/via_irq.c create mode 120000 bsd-core/via_map.c create mode 120000 bsd-core/via_mm.c create mode 120000 bsd-core/via_mm.h create mode 120000 bsd-core/via_verifier.c create mode 120000 bsd-core/via_verifier.h create mode 120000 bsd-core/via_video.c diff --git a/.gitignore b/.gitignore index 7ab6ced0..8447db2b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,61 +1,3 @@ -bsd-core/linux -bsd-core/drm.h -bsd-core/drm_sarea.h -bsd-core/i915_dma.c -bsd-core/i915_drm.h -bsd-core/i915_drv.h -bsd-core/i915_irq.c -bsd-core/i915_mem.c -bsd-core/mach64_dma.c -bsd-core/mach64_drm.h -bsd-core/mach64_drv.h -bsd-core/mach64_irq.c -bsd-core/mach64_state.c -bsd-core/mga_dma.c -bsd-core/mga_drm.h -bsd-core/mga_drv.h -bsd-core/mga_irq.c -bsd-core/mga_state.c -bsd-core/mga_ucode.h -bsd-core/mga_warp.c -bsd-core/nv_drv.h -bsd-core/r128_cce.c -bsd-core/r128_drm.h -bsd-core/r128_drv.h -bsd-core/r128_irq.c -bsd-core/r128_state.c -bsd-core/r300_cmdbuf.c -bsd-core/r300_reg.h -bsd-core/radeon_cp.c -bsd-core/radeon_drm.h -bsd-core/radeon_drv.h -bsd-core/radeon_irq.c -bsd-core/radeon_mem.c -bsd-core/radeon_state.c -bsd-core/savage_bci.c -bsd-core/savage_drm.h -bsd-core/savage_drv.h -bsd-core/savage_state.c -bsd-core/sis_drm.h -bsd-core/sis_drv.h -bsd-core/sis_ds.c -bsd-core/sis_ds.h -bsd-core/sis_mm.c -bsd-core/tdfx_drv.h -bsd-core/via_3d_reg.h -bsd-core/via_dma.c -bsd-core/via_drm.h -bsd-core/via_drv.c -bsd-core/via_drv.h -bsd-core/via_ds.c -bsd-core/via_ds.h -bsd-core/via_irq.c -bsd-core/via_map.c -bsd-core/via_mm.c -bsd-core/via_mm.h -bsd-core/via_verifier.c -bsd-core/via_verifier.h -bsd-core/via_video.c bsd-core/*/@ bsd-core/*/machine *.flags diff --git a/bsd-core/Makefile b/bsd-core/Makefile index 00889dae..a58ac0a5 100644 --- a/bsd-core/Makefile +++ b/bsd-core/Makefile @@ -1,71 +1,11 @@ SHARED= ../shared-core -SHAREDFILES= drm.h \ - drm_sarea.h \ - i915_dma.c \ - i915_drm.h \ - i915_drv.h \ - i915_irq.c \ - i915_mem.c \ - mach64_dma.c \ - mach64_drm.h \ - mach64_drv.h \ - mach64_irq.c \ - mach64_state.c \ - mga_dma.c \ - mga_drm.h \ - mga_drv.h \ - mga_irq.c \ - mga_state.c \ - mga_ucode.h \ - mga_warp.c \ - r128_cce.c \ - r128_drm.h \ - r128_drv.h \ - r128_irq.c \ - r128_state.c \ - radeon_cp.c \ - radeon_drm.h \ - radeon_drv.h \ - radeon_irq.c \ - radeon_mem.c \ - radeon_state.c \ - r300_cmdbuf.c \ - r300_reg.h \ - savage_bci.c \ - savage_drm.h \ - savage_drv.h \ - savage_state.c \ - sis_drm.h \ - sis_drv.h \ - sis_ds.c \ - sis_ds.h \ - sis_mm.c \ - tdfx_drv.h \ - via_3d_reg.h \ - via_dma.c \ - via_drm.h \ - via_drv.h \ - via_ds.c \ - via_ds.h \ - via_irq.c \ - via_map.c \ - via_mm.c \ - via_mm.h \ - via_verifier.c \ - via_verifier.h \ - via_video.c SUBDIR = drm mach64 mga r128 radeon savage sis tdfx i915 # via -CLEANFILES+= ${SHAREDFILES} - .include -depend: drm_pciids.h ${SHAREDFILES} -all: drm_pciids.h ${SHAREDFILES} +depend: drm_pciids.h +all: drm_pciids.h drm_pciids.h: ${SHARED}/drm_pciids.txt sh ../scripts/create_bsd_pci_lists.sh < ${SHARED}/drm_pciids.txt - -${SHAREDFILES}: - ln -sf ${SHARED}/$@ $@ diff --git a/bsd-core/drm.h b/bsd-core/drm.h new file mode 120000 index 00000000..29636692 --- /dev/null +++ b/bsd-core/drm.h @@ -0,0 +1 @@ +../shared-core/drm.h \ No newline at end of file diff --git a/bsd-core/drm_sarea.h b/bsd-core/drm_sarea.h new file mode 120000 index 00000000..fd428f42 --- /dev/null +++ b/bsd-core/drm_sarea.h @@ -0,0 +1 @@ +../shared-core/drm_sarea.h \ No newline at end of file diff --git a/bsd-core/i915_dma.c b/bsd-core/i915_dma.c new file mode 120000 index 00000000..c61d967e --- /dev/null +++ b/bsd-core/i915_dma.c @@ -0,0 +1 @@ +../shared-core/i915_dma.c \ No newline at end of file diff --git a/bsd-core/i915_drm.h b/bsd-core/i915_drm.h new file mode 120000 index 00000000..ed53f01d --- /dev/null +++ b/bsd-core/i915_drm.h @@ -0,0 +1 @@ +../shared-core/i915_drm.h \ No newline at end of file diff --git a/bsd-core/i915_drv.h b/bsd-core/i915_drv.h new file mode 120000 index 00000000..085558ca --- /dev/null +++ b/bsd-core/i915_drv.h @@ -0,0 +1 @@ +../shared-core/i915_drv.h \ No newline at end of file diff --git a/bsd-core/i915_irq.c b/bsd-core/i915_irq.c new file mode 120000 index 00000000..2058a2e4 --- /dev/null +++ b/bsd-core/i915_irq.c @@ -0,0 +1 @@ +../shared-core/i915_irq.c \ No newline at end of file diff --git a/bsd-core/i915_mem.c b/bsd-core/i915_mem.c new file mode 120000 index 00000000..e8e56553 --- /dev/null +++ b/bsd-core/i915_mem.c @@ -0,0 +1 @@ +../shared-core/i915_mem.c \ No newline at end of file diff --git a/bsd-core/mach64_dma.c b/bsd-core/mach64_dma.c new file mode 120000 index 00000000..e5c28975 --- /dev/null +++ b/bsd-core/mach64_dma.c @@ -0,0 +1 @@ +../shared-core/mach64_dma.c \ No newline at end of file diff --git a/bsd-core/mach64_drm.h b/bsd-core/mach64_drm.h new file mode 120000 index 00000000..136ea936 --- /dev/null +++ b/bsd-core/mach64_drm.h @@ -0,0 +1 @@ +../shared-core/mach64_drm.h \ No newline at end of file diff --git a/bsd-core/mach64_drv.h b/bsd-core/mach64_drv.h new file mode 120000 index 00000000..85222cc2 --- /dev/null +++ b/bsd-core/mach64_drv.h @@ -0,0 +1 @@ +../shared-core/mach64_drv.h \ No newline at end of file diff --git a/bsd-core/mach64_irq.c b/bsd-core/mach64_irq.c new file mode 120000 index 00000000..a1235d58 --- /dev/null +++ b/bsd-core/mach64_irq.c @@ -0,0 +1 @@ +../shared-core/mach64_irq.c \ No newline at end of file diff --git a/bsd-core/mach64_state.c b/bsd-core/mach64_state.c new file mode 120000 index 00000000..b11f202c --- /dev/null +++ b/bsd-core/mach64_state.c @@ -0,0 +1 @@ +../shared-core/mach64_state.c \ No newline at end of file diff --git a/bsd-core/mga_dma.c b/bsd-core/mga_dma.c new file mode 120000 index 00000000..f290be9b --- /dev/null +++ b/bsd-core/mga_dma.c @@ -0,0 +1 @@ +../shared-core/mga_dma.c \ No newline at end of file diff --git a/bsd-core/mga_drm.h b/bsd-core/mga_drm.h new file mode 120000 index 00000000..1c87036f --- /dev/null +++ b/bsd-core/mga_drm.h @@ -0,0 +1 @@ +../shared-core/mga_drm.h \ No newline at end of file diff --git a/bsd-core/mga_drv.h b/bsd-core/mga_drv.h new file mode 120000 index 00000000..cb0c9e1d --- /dev/null +++ b/bsd-core/mga_drv.h @@ -0,0 +1 @@ +../shared-core/mga_drv.h \ No newline at end of file diff --git a/bsd-core/mga_irq.c b/bsd-core/mga_irq.c new file mode 120000 index 00000000..cf521d29 --- /dev/null +++ b/bsd-core/mga_irq.c @@ -0,0 +1 @@ +../shared-core/mga_irq.c \ No newline at end of file diff --git a/bsd-core/mga_state.c b/bsd-core/mga_state.c new file mode 120000 index 00000000..8bda8ba9 --- /dev/null +++ b/bsd-core/mga_state.c @@ -0,0 +1 @@ +../shared-core/mga_state.c \ No newline at end of file diff --git a/bsd-core/mga_ucode.h b/bsd-core/mga_ucode.h new file mode 120000 index 00000000..728b9aca --- /dev/null +++ b/bsd-core/mga_ucode.h @@ -0,0 +1 @@ +../shared-core/mga_ucode.h \ No newline at end of file diff --git a/bsd-core/mga_warp.c b/bsd-core/mga_warp.c new file mode 120000 index 00000000..d35b3255 --- /dev/null +++ b/bsd-core/mga_warp.c @@ -0,0 +1 @@ +../shared-core/mga_warp.c \ No newline at end of file diff --git a/bsd-core/r128_cce.c b/bsd-core/r128_cce.c new file mode 120000 index 00000000..0c1d659e --- /dev/null +++ b/bsd-core/r128_cce.c @@ -0,0 +1 @@ +../shared-core/r128_cce.c \ No newline at end of file diff --git a/bsd-core/r128_drm.h b/bsd-core/r128_drm.h new file mode 120000 index 00000000..363852cb --- /dev/null +++ b/bsd-core/r128_drm.h @@ -0,0 +1 @@ +../shared-core/r128_drm.h \ No newline at end of file diff --git a/bsd-core/r128_drv.h b/bsd-core/r128_drv.h new file mode 120000 index 00000000..4f7e822d --- /dev/null +++ b/bsd-core/r128_drv.h @@ -0,0 +1 @@ +../shared-core/r128_drv.h \ No newline at end of file diff --git a/bsd-core/r128_irq.c b/bsd-core/r128_irq.c new file mode 120000 index 00000000..66d28b05 --- /dev/null +++ b/bsd-core/r128_irq.c @@ -0,0 +1 @@ +../shared-core/r128_irq.c \ No newline at end of file diff --git a/bsd-core/r128_state.c b/bsd-core/r128_state.c new file mode 120000 index 00000000..e83d84b5 --- /dev/null +++ b/bsd-core/r128_state.c @@ -0,0 +1 @@ +../shared-core/r128_state.c \ No newline at end of file diff --git a/bsd-core/r300_cmdbuf.c b/bsd-core/r300_cmdbuf.c new file mode 120000 index 00000000..6674d056 --- /dev/null +++ b/bsd-core/r300_cmdbuf.c @@ -0,0 +1 @@ +../shared-core/r300_cmdbuf.c \ No newline at end of file diff --git a/bsd-core/r300_reg.h b/bsd-core/r300_reg.h new file mode 120000 index 00000000..ef54eba2 --- /dev/null +++ b/bsd-core/r300_reg.h @@ -0,0 +1 @@ +../shared-core/r300_reg.h \ No newline at end of file diff --git a/bsd-core/radeon_cp.c b/bsd-core/radeon_cp.c new file mode 120000 index 00000000..ee860943 --- /dev/null +++ b/bsd-core/radeon_cp.c @@ -0,0 +1 @@ +../shared-core/radeon_cp.c \ No newline at end of file diff --git a/bsd-core/radeon_drm.h b/bsd-core/radeon_drm.h new file mode 120000 index 00000000..54f595a3 --- /dev/null +++ b/bsd-core/radeon_drm.h @@ -0,0 +1 @@ +../shared-core/radeon_drm.h \ No newline at end of file diff --git a/bsd-core/radeon_drv.h b/bsd-core/radeon_drv.h new file mode 120000 index 00000000..5b415ea8 --- /dev/null +++ b/bsd-core/radeon_drv.h @@ -0,0 +1 @@ +../shared-core/radeon_drv.h \ No newline at end of file diff --git a/bsd-core/radeon_irq.c b/bsd-core/radeon_irq.c new file mode 120000 index 00000000..2f394a5e --- /dev/null +++ b/bsd-core/radeon_irq.c @@ -0,0 +1 @@ +../shared-core/radeon_irq.c \ No newline at end of file diff --git a/bsd-core/radeon_mem.c b/bsd-core/radeon_mem.c new file mode 120000 index 00000000..8cc27989 --- /dev/null +++ b/bsd-core/radeon_mem.c @@ -0,0 +1 @@ +../shared-core/radeon_mem.c \ No newline at end of file diff --git a/bsd-core/radeon_state.c b/bsd-core/radeon_state.c new file mode 120000 index 00000000..ccee8761 --- /dev/null +++ b/bsd-core/radeon_state.c @@ -0,0 +1 @@ +../shared-core/radeon_state.c \ No newline at end of file diff --git a/bsd-core/savage_bci.c b/bsd-core/savage_bci.c new file mode 120000 index 00000000..b8436713 --- /dev/null +++ b/bsd-core/savage_bci.c @@ -0,0 +1 @@ +../shared-core/savage_bci.c \ No newline at end of file diff --git a/bsd-core/savage_drm.h b/bsd-core/savage_drm.h new file mode 120000 index 00000000..0dab2e3b --- /dev/null +++ b/bsd-core/savage_drm.h @@ -0,0 +1 @@ +../shared-core/savage_drm.h \ No newline at end of file diff --git a/bsd-core/savage_drv.h b/bsd-core/savage_drv.h new file mode 120000 index 00000000..8397009c --- /dev/null +++ b/bsd-core/savage_drv.h @@ -0,0 +1 @@ +../shared-core/savage_drv.h \ No newline at end of file diff --git a/bsd-core/savage_state.c b/bsd-core/savage_state.c new file mode 120000 index 00000000..e55dc5d4 --- /dev/null +++ b/bsd-core/savage_state.c @@ -0,0 +1 @@ +../shared-core/savage_state.c \ No newline at end of file diff --git a/bsd-core/sis_drm.h b/bsd-core/sis_drm.h new file mode 120000 index 00000000..36c77aac --- /dev/null +++ b/bsd-core/sis_drm.h @@ -0,0 +1 @@ +../shared-core/sis_drm.h \ No newline at end of file diff --git a/bsd-core/sis_drv.h b/bsd-core/sis_drv.h new file mode 120000 index 00000000..3fddfdae --- /dev/null +++ b/bsd-core/sis_drv.h @@ -0,0 +1 @@ +../shared-core/sis_drv.h \ No newline at end of file diff --git a/bsd-core/sis_ds.c b/bsd-core/sis_ds.c new file mode 120000 index 00000000..242310a0 --- /dev/null +++ b/bsd-core/sis_ds.c @@ -0,0 +1 @@ +../shared-core/sis_ds.c \ No newline at end of file diff --git a/bsd-core/sis_ds.h b/bsd-core/sis_ds.h new file mode 120000 index 00000000..8cbdaf3b --- /dev/null +++ b/bsd-core/sis_ds.h @@ -0,0 +1 @@ +../shared-core/sis_ds.h \ No newline at end of file diff --git a/bsd-core/sis_mm.c b/bsd-core/sis_mm.c new file mode 120000 index 00000000..8f802ec3 --- /dev/null +++ b/bsd-core/sis_mm.c @@ -0,0 +1 @@ +../shared-core/sis_mm.c \ No newline at end of file diff --git a/bsd-core/tdfx_drv.h b/bsd-core/tdfx_drv.h new file mode 120000 index 00000000..8df70329 --- /dev/null +++ b/bsd-core/tdfx_drv.h @@ -0,0 +1 @@ +../shared-core/tdfx_drv.h \ No newline at end of file diff --git a/bsd-core/via_3d_reg.h b/bsd-core/via_3d_reg.h new file mode 120000 index 00000000..90d238ec --- /dev/null +++ b/bsd-core/via_3d_reg.h @@ -0,0 +1 @@ +../shared-core/via_3d_reg.h \ No newline at end of file diff --git a/bsd-core/via_dma.c b/bsd-core/via_dma.c new file mode 120000 index 00000000..1f4d920f --- /dev/null +++ b/bsd-core/via_dma.c @@ -0,0 +1 @@ +../shared-core/via_dma.c \ No newline at end of file diff --git a/bsd-core/via_drm.h b/bsd-core/via_drm.h new file mode 120000 index 00000000..7cd175d3 --- /dev/null +++ b/bsd-core/via_drm.h @@ -0,0 +1 @@ +../shared-core/via_drm.h \ No newline at end of file diff --git a/bsd-core/via_drv.h b/bsd-core/via_drv.h new file mode 120000 index 00000000..8954fe88 --- /dev/null +++ b/bsd-core/via_drv.h @@ -0,0 +1 @@ +../shared-core/via_drv.h \ No newline at end of file diff --git a/bsd-core/via_ds.c b/bsd-core/via_ds.c new file mode 120000 index 00000000..b0fbb694 --- /dev/null +++ b/bsd-core/via_ds.c @@ -0,0 +1 @@ +../shared-core/via_ds.c \ No newline at end of file diff --git a/bsd-core/via_ds.h b/bsd-core/via_ds.h new file mode 120000 index 00000000..dc8f2f44 --- /dev/null +++ b/bsd-core/via_ds.h @@ -0,0 +1 @@ +../shared-core/via_ds.h \ No newline at end of file diff --git a/bsd-core/via_irq.c b/bsd-core/via_irq.c new file mode 120000 index 00000000..f615af87 --- /dev/null +++ b/bsd-core/via_irq.c @@ -0,0 +1 @@ +../shared-core/via_irq.c \ No newline at end of file diff --git a/bsd-core/via_map.c b/bsd-core/via_map.c new file mode 120000 index 00000000..b5056634 --- /dev/null +++ b/bsd-core/via_map.c @@ -0,0 +1 @@ +../shared-core/via_map.c \ No newline at end of file diff --git a/bsd-core/via_mm.c b/bsd-core/via_mm.c new file mode 120000 index 00000000..f9ec0f37 --- /dev/null +++ b/bsd-core/via_mm.c @@ -0,0 +1 @@ +../shared-core/via_mm.c \ No newline at end of file diff --git a/bsd-core/via_mm.h b/bsd-core/via_mm.h new file mode 120000 index 00000000..fe2234f6 --- /dev/null +++ b/bsd-core/via_mm.h @@ -0,0 +1 @@ +../shared-core/via_mm.h \ No newline at end of file diff --git a/bsd-core/via_verifier.c b/bsd-core/via_verifier.c new file mode 120000 index 00000000..00b411bd --- /dev/null +++ b/bsd-core/via_verifier.c @@ -0,0 +1 @@ +../shared-core/via_verifier.c \ No newline at end of file diff --git a/bsd-core/via_verifier.h b/bsd-core/via_verifier.h new file mode 120000 index 00000000..62d3e287 --- /dev/null +++ b/bsd-core/via_verifier.h @@ -0,0 +1 @@ +../shared-core/via_verifier.h \ No newline at end of file diff --git a/bsd-core/via_video.c b/bsd-core/via_video.c new file mode 120000 index 00000000..a6d27947 --- /dev/null +++ b/bsd-core/via_video.c @@ -0,0 +1 @@ +../shared-core/via_video.c \ No newline at end of file From c1119b1b092527fbb6950d0b5e51e076ddb00f29 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 20 Jul 2007 06:39:25 -0700 Subject: [PATCH 164/437] Replace filp in ioctl arguments with drm_file *file_priv. As a fallout, replace filp storage with file_priv storage for "unique identifier of a client" all over the DRM. There is a 1:1 mapping, so this should be a noop. This could be a minor performance improvement, as everything on Linux dereferenced filp to get file_priv anyway, while only the mmap ioctls went the other direction. --- bsd-core/drmP.h | 40 +++---- bsd-core/drm_bufs.c | 8 +- bsd-core/drm_dma.c | 9 +- bsd-core/drm_drv.c | 56 +++++----- bsd-core/drm_irq.c | 2 +- bsd-core/drm_lock.c | 6 +- linux-core/drmP.h | 133 +++++++++++----------- linux-core/drm_agpsupport.c | 53 ++++----- linux-core/drm_auth.c | 26 ++--- linux-core/drm_bo.c | 100 ++++++++--------- linux-core/drm_bufs.c | 66 +++++------ linux-core/drm_context.c | 56 +++++----- linux-core/drm_dma.c | 11 +- linux-core/drm_drv.c | 29 +++-- linux-core/drm_fence.c | 32 +++--- linux-core/drm_fops.c | 34 +++--- linux-core/drm_ioc32.c | 2 +- linux-core/drm_ioctl.c | 39 +++---- linux-core/drm_irq.c | 19 ++-- linux-core/drm_lock.c | 26 ++--- linux-core/drm_os_linux.h | 9 +- linux-core/drm_scatter.c | 13 +-- linux-core/drm_vm.c | 6 +- linux-core/i810_dma.c | 127 ++++++++++----------- linux-core/i810_drv.h | 7 +- linux-core/sis_mm.c | 16 +-- linux-core/via_dmablit.c | 2 +- linux-core/via_mm.c | 10 +- shared-core/i915_dma.c | 12 +- shared-core/i915_drv.h | 8 +- shared-core/i915_irq.c | 4 +- shared-core/i915_mem.c | 41 +++---- shared-core/mach64_dma.c | 17 +-- shared-core/mach64_drv.h | 4 +- shared-core/mach64_state.c | 47 ++++---- shared-core/mga_dma.c | 15 +-- shared-core/mga_state.c | 12 +- shared-core/nouveau_drv.h | 28 +++-- shared-core/nouveau_fifo.c | 25 +++-- shared-core/nouveau_mem.c | 62 ++++++----- shared-core/nouveau_notifier.c | 11 +- shared-core/nouveau_object.c | 4 +- shared-core/nouveau_state.c | 13 ++- shared-core/nv50_instmem.c | 2 +- shared-core/r128_cce.c | 24 ++-- shared-core/r128_drv.h | 3 +- shared-core/r128_state.c | 43 ++++---- shared-core/r300_cmdbuf.c | 8 +- shared-core/radeon_cp.c | 31 +++--- shared-core/radeon_drv.h | 20 ++-- shared-core/radeon_irq.c | 2 +- shared-core/radeon_mem.c | 40 +++---- shared-core/radeon_state.c | 196 ++++++++++++++++----------------- shared-core/savage_bci.c | 20 ++-- shared-core/savage_drv.h | 3 +- shared-core/savage_state.c | 2 +- shared-core/sis_drv.h | 3 +- shared-core/via_dma.c | 8 +- shared-core/via_drv.h | 3 +- 59 files changed, 815 insertions(+), 833 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index e9271ff7..84baf5f0 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -217,10 +217,6 @@ MALLOC_DECLARE(M_DRM); #define spldrm() spltty() #endif /* __NetBSD__ || __OpenBSD__ */ -/* Currently our DRMFILE (filp) is a void * which is actually the pid - * of the current process. It should be a per-open unique pointer, but - * code for that is not yet written */ -#define DRMFILE void * #define DRM_IRQ_ARGS void *arg typedef void irqreturn_t; #define IRQ_HANDLED /* nothing */ @@ -237,7 +233,8 @@ enum { #define DRM_DEVICE \ drm_device_t *dev = kdev->si_drv1 #define DRM_IOCTL_ARGS struct cdev *kdev, u_long cmd, caddr_t data, \ - int flags, DRM_STRUCTPROC *p, DRMFILE filp + int flags, DRM_STRUCTPROC *p, \ + struct drm_file *file_priv #define PAGE_ALIGN(addr) round_page(addr) /* DRM_SUSER returns true if the user is superuser */ @@ -260,8 +257,9 @@ enum { drm_device_t *dev = (device_lookup(&drm_cd, \ minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)] #endif /* __OpenBSD__ */ -#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, \ - int flags, DRM_STRUCTPROC *p, DRMFILE filp +#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, \ + int flags, DRM_STRUCTPROC *p, \ + struct drm_file *file_priv #define CDEV_MAJOR 34 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) @@ -385,23 +383,10 @@ typedef vaddr_t vm_offset_t; (_map) = (_dev)->context_sareas[_ctx]; \ } while(0) -#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) \ -do { \ - if (_filp != (DRMFILE)(intptr_t)DRM_CURRENTPID) { \ - DRM_ERROR("filp doesn't match curproc\n"); \ - return EINVAL; \ - } \ - _priv = drm_find_file_by_proc(dev, DRM_CURPROC); \ - if (_priv == NULL) { \ - DRM_ERROR("can't find authenticator\n"); \ - return EINVAL; \ - } \ -} while (0) - -#define LOCK_TEST_WITH_RETURN(dev, filp) \ +#define LOCK_TEST_WITH_RETURN(dev, file_priv) \ do { \ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \ - dev->lock.filp != filp) { \ + dev->lock.file_priv != file_priv) { \ DRM_ERROR("%s called without lock held\n", \ __FUNCTION__); \ return EINVAL; \ @@ -479,7 +464,7 @@ typedef struct drm_buf { unsigned long bus_address; /* Bus address of buffer */ struct drm_buf *next; /* Kernel-only: used for free list */ __volatile__ int pending; /* On hardware DMA queue */ - DRMFILE filp; /* Unique identifier of holding process */ + struct drm_file *file_priv; /* Unique identifier of holding process */ int context; /* Kernel queue for this buffer */ enum { DRM_LIST_NONE = 0, @@ -541,7 +526,7 @@ struct drm_file { typedef struct drm_lock_data { drm_hw_lock_t *hw_lock; /* Hardware lock */ - DRMFILE filp; /* Unique identifier of holding process (NULL is kernel)*/ + struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/ int lock_queue; /* Queue of blocked processes */ unsigned long lock_time; /* Time of last lock in jiffies */ } drm_lock_data_t; @@ -645,11 +630,12 @@ struct drm_driver_info { int (*load)(struct drm_device *, unsigned long flags); int (*firstopen)(struct drm_device *); int (*open)(struct drm_device *, drm_file_t *); - void (*preclose)(struct drm_device *, void *filp); + void (*preclose)(struct drm_device *, struct drm_file *file_priv); void (*postclose)(struct drm_device *, drm_file_t *); void (*lastclose)(struct drm_device *); int (*unload)(struct drm_device *); - void (*reclaim_buffers_locked)(struct drm_device *, void *filp); + void (*reclaim_buffers_locked)(struct drm_device *, + struct drm_file *file_priv); int (*dma_ioctl)(DRM_IOCTL_ARGS); void (*dma_ready)(struct drm_device *); int (*dma_quiescent)(struct drm_device *); @@ -900,7 +886,7 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request); int drm_dma_setup(drm_device_t *dev); void drm_dma_takedown(drm_device_t *dev); void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf); -void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp); +void drm_reclaim_buffers(drm_device_t *dev, struct drm_file *file_priv); #define drm_core_reclaim_buffers drm_reclaim_buffers /* IRQ support (drm_irq.c) */ diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index de28a2cf..bc019741 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -469,7 +469,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->pending = 0; - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver.buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, M_DRM, @@ -610,7 +610,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) buf->bus_address = dmah->busaddr + offset; buf->next = NULL; buf->pending = 0; - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver.buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, M_DRM, @@ -724,7 +724,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) buf->address = (void *)(agp_offset + offset + dev->sg->handle); buf->next = NULL; buf->pending = 0; - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver.buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, M_DRM, @@ -1008,7 +1008,7 @@ int drm_freebufs(DRM_IOCTL_ARGS) break; } buf = dma->buflist[idx]; - if ( buf->filp != filp ) { + if ( buf->file_priv != file_priv ) { DRM_ERROR("Process %d freeing buffer not owned\n", DRM_CURRENTPID); retcode = EINVAL; diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c index 90678dfc..fc219039 100644 --- a/bsd-core/drm_dma.c +++ b/bsd-core/drm_dma.c @@ -89,18 +89,18 @@ void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf) if (!buf) return; buf->pending = 0; - buf->filp = NULL; + buf->file_priv= NULL; buf->used = 0; } -void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp) +void drm_reclaim_buffers(drm_device_t *dev, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; int i; if (!dma) return; for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->filp == filp) { + if (dma->buflist[i]->file_priv == file_priv) { switch (dma->buflist[i]->list) { case DRM_LIST_NONE: drm_free_buffer(dev, dma->buflist[i]); @@ -122,7 +122,8 @@ int drm_dma(DRM_IOCTL_ARGS) DRM_DEVICE; if (dev->driver.dma_ioctl) { - return -dev->driver.dma_ioctl(kdev, cmd, data, flags, p, filp); + return -dev->driver.dma_ioctl(kdev, cmd, data, flags, p, + file_priv); } else { DRM_DEBUG("DMA ioctl on driver with no dma handler\n"); return EINVAL; diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index fa2958b9..0b33ba34 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -499,7 +499,7 @@ static int drm_lastclose(drm_device_t *dev) drm_dma_takedown(dev); if ( dev->lock.hw_lock ) { dev->lock.hw_lock = NULL; /* SHM removed */ - dev->lock.filp = NULL; + dev->lock.file_priv = NULL; DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); } @@ -704,24 +704,23 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) { - drm_file_t *priv; + drm_file_t *file_priv; DRM_DEVICE; int retcode = 0; - DRMFILE filp = (void *)(uintptr_t)(DRM_CURRENTPID); - + DRM_DEBUG( "open_count = %d\n", dev->open_count ); DRM_LOCK(); - priv = drm_find_file_by_proc(dev, p); - if (!priv) { + file_priv = drm_find_file_by_proc(dev, p); + if (!file_priv) { DRM_UNLOCK(); DRM_ERROR("can't find authenticator\n"); return EINVAL; } if (dev->driver.preclose != NULL) - dev->driver.preclose(dev, filp); + dev->driver.preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release @@ -736,12 +735,12 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) #endif if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) - && dev->lock.filp == filp) { + && dev->lock.file_priv == file_priv) { DRM_DEBUG("Process %d dead, freeing lock for context %d\n", DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); if (dev->driver.reclaim_buffers_locked != NULL) - dev->driver.reclaim_buffers_locked(dev, filp); + dev->driver.reclaim_buffers_locked(dev, file_priv); drm_lock_free(dev, &dev->lock.hw_lock->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); @@ -761,7 +760,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) } if (drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { - dev->lock.filp = filp; + dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); break; /* Got lock */ @@ -778,14 +777,14 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) break; } if (retcode == 0) { - dev->driver.reclaim_buffers_locked(dev, filp); + dev->driver.reclaim_buffers_locked(dev, file_priv); drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); } } if (dev->driver.use_dma && !dev->driver.reclaim_buffers_locked) - drm_reclaim_buffers(dev, filp); + drm_reclaim_buffers(dev, file_priv); #if defined (__FreeBSD__) && (__FreeBSD_version >= 500000) funsetown(&dev->buf_sigio); @@ -795,11 +794,11 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) dev->buf_pgid = 0; #endif /* __NetBSD__ || __OpenBSD__ */ - if (--priv->refs == 0) { + if (--file_priv->refs == 0) { if (dev->driver.postclose != NULL) - dev->driver.postclose(dev, priv); - TAILQ_REMOVE(&dev->files, priv, link); - free(priv, M_DRM); + dev->driver.postclose(dev, file_priv); + TAILQ_REMOVE(&dev->files, file_priv, link); + free(file_priv, M_DRM); } /* ======================================================== @@ -830,26 +829,27 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, int (*func)(DRM_IOCTL_ARGS); int nr = DRM_IOCTL_NR(cmd); int is_driver_ioctl = 0; - drm_file_t *priv; - DRMFILE filp = (DRMFILE)(uintptr_t)DRM_CURRENTPID; + drm_file_t *file_priv; DRM_LOCK(); - priv = drm_find_file_by_proc(dev, p); + file_priv = drm_find_file_by_proc(dev, p); DRM_UNLOCK(); - if (priv == NULL) { + if (file_priv == NULL) { DRM_ERROR("can't find authenticator\n"); return EINVAL; } atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); - ++priv->ioctl_count; + ++file_priv->ioctl_count; #ifdef __FreeBSD__ DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", - DRM_CURRENTPID, cmd, nr, (long)dev->device, priv->authenticated ); + DRM_CURRENTPID, cmd, nr, (long)dev->device, + file_priv->authenticated ); #elif defined(__NetBSD__) || defined(__OpenBSD__) DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", - DRM_CURRENTPID, cmd, nr, (long)&dev->device, priv->authenticated ); + DRM_CURRENTPID, cmd, nr, (long)&dev->device, + file_priv->authenticated ); #endif switch (cmd) { @@ -904,17 +904,15 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, DRM_DEBUG( "no function\n" ); return EINVAL; } - /* ioctl->master check should be against something in the filp set up - * for the first opener, but it doesn't matter yet. - */ + if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || - ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || - ((ioctl->flags & DRM_MASTER) && !priv->master)) + ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || + ((ioctl->flags & DRM_MASTER) && !file_priv->master)) return EACCES; if (is_driver_ioctl) DRM_LOCK(); - retcode = func(kdev, cmd, data, flags, p, filp); + retcode = func(kdev, cmd, data, flags, p, file_priv); if (is_driver_ioctl) { DRM_UNLOCK(); /* Driver ioctls in shared code follow the linux convention of diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 215eb0c9..95c84ab5 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -306,7 +306,7 @@ static void drm_locked_task(void *context, int pending __unused) if (drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { - dev->lock.filp = (void *)(uintptr_t)DRM_CURRENTPID; + dev->lock.file_priv = NULL; /* kernel owned */ dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index d0e61d3a..54b64806 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -66,7 +66,7 @@ int drm_lock_transfer(drm_device_t *dev, { unsigned int old, new; - dev->lock.filp = NULL; + dev->lock.file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -80,7 +80,7 @@ int drm_lock_free(drm_device_t *dev, { unsigned int old, new; - dev->lock.filp = NULL; + dev->lock.file_priv = NULL; do { old = *lock; new = 0; @@ -118,7 +118,7 @@ int drm_lock(DRM_IOCTL_ARGS) DRM_LOCK(); for (;;) { if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { - dev->lock.filp = (void *)(uintptr_t)DRM_CURRENTPID; + dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ diff --git a/linux-core/drmP.h b/linux-core/drmP.h index af859c38..f4367955 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -84,6 +84,8 @@ #include "drm_os_linux.h" #include "drm_hashtab.h" +struct drm_file; + /* If you want the memory alloc debug functionality, change define below */ /* #define DEBUG_MEMORY */ @@ -248,15 +250,15 @@ * Test that the hardware lock is held by the caller, returning otherwise. * * \param dev DRM device. - * \param filp file pointer of the caller. + * \param file_priv DRM file private pointer of the caller. */ -#define LOCK_TEST_WITH_RETURN( dev, filp ) \ +#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ do { \ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ - dev->lock.filp != filp ) { \ + dev->lock.file_priv != file_priv ) { \ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ - dev->lock.filp, filp ); \ + dev->lock.file_priv, file_priv ); \ return -EINVAL; \ } \ } while (0) @@ -277,11 +279,11 @@ do { \ * Ioctl function type. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private pointer. * \param cmd command. * \param arg argument. */ -typedef int drm_ioctl_t(struct inode *inode, struct file *filp, +typedef int drm_ioctl_t(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, @@ -323,7 +325,7 @@ struct drm_buf { __volatile__ int waiting; /**< On kernel DMA queue */ __volatile__ int pending; /**< On hardware DMA queue */ wait_queue_head_t dma_wait; /**< Processes waiting */ - struct file *filp; /**< Pointer to holding file descr */ + struct drm_file *file_priv; /**< Private of holding file descr */ int context; /**< Kernel queue for this buffer */ int while_locked; /**< Dispatch this buffer while locked */ enum { @@ -419,6 +421,7 @@ struct drm_file { struct list_head user_objects; struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; + struct file *filp; void *driver_priv; }; @@ -446,7 +449,8 @@ struct drm_queue { */ struct drm_lock_data { struct drm_hw_lock *hw_lock; /**< Hardware lock */ - struct file *filp; /**< File descr of lock holder (0=kernel) */ + /** Private of lock holder's file (NULL=kernel) */ + struct drm_file *file_priv; wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ spinlock_t spinlock; @@ -603,7 +607,7 @@ struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); int (*open) (struct drm_device *, struct drm_file *); - void (*preclose) (struct drm_device *, struct file * filp); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); @@ -637,11 +641,12 @@ struct drm_driver { void (*irq_preinstall) (struct drm_device * dev); void (*irq_postinstall) (struct drm_device * dev); void (*irq_uninstall) (struct drm_device * dev); - void (*reclaim_buffers) (struct drm_device *dev, struct file * filp); + void (*reclaim_buffers) (struct drm_device *dev, + struct drm_file *file_priv); void (*reclaim_buffers_locked) (struct drm_device *dev, - struct file * filp); + struct drm_file *file_priv); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, - struct file * filp); + struct drm_file *file_priv); unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); @@ -939,69 +944,70 @@ extern void drm_init_memctl(size_t low_threshold, size_t unit_size); /* Misc. IOCTL support (drm_ioctl.h) */ -extern int drm_irq_by_busid(struct inode *inode, struct file *filp, +extern int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getunique(struct inode *inode, struct file *filp, +extern int drm_getunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_setunique(struct inode *inode, struct file *filp, +extern int drm_setunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getmap(struct inode *inode, struct file *filp, +extern int drm_getmap(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getclient(struct inode *inode, struct file *filp, +extern int drm_getclient(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getstats(struct inode *inode, struct file *filp, +extern int drm_getstats(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_setversion(struct inode *inode, struct file *filp, +extern int drm_setversion(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_noop(struct inode *inode, struct file *filp, +extern int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* Context IOCTL support (drm_context.h) */ -extern int drm_resctx(struct inode *inode, struct file *filp, +extern int drm_resctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_addctx(struct inode *inode, struct file *filp, +extern int drm_addctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_modctx(struct inode *inode, struct file *filp, +extern int drm_modctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getctx(struct inode *inode, struct file *filp, +extern int drm_getctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_switchctx(struct inode *inode, struct file *filp, +extern int drm_switchctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_newctx(struct inode *inode, struct file *filp, +extern int drm_newctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_rmctx(struct inode *inode, struct file *filp, +extern int drm_rmctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_ctxbitmap_init(struct drm_device *dev); extern void drm_ctxbitmap_cleanup(struct drm_device *dev); extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); -extern int drm_setsareactx(struct inode *inode, struct file *filp, +extern int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getsareactx(struct inode *inode, struct file *filp, +extern int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct inode *inode, struct file *filp, +extern int drm_adddraw(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_rmdraw(struct inode *inode, struct file *filp, +extern int drm_rmdraw(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_update_drawable_info(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_update_drawable_info(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id); extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ -extern int drm_getmagic(struct inode *inode, struct file *filp, +extern int drm_getmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_authmagic(struct inode *inode, struct file *filp, +extern int drm_authmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* Locking IOCTL support (drm_lock.h) */ -extern int drm_lock(struct inode *inode, struct file *filp, +extern int drm_lock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_unlock(struct inode *inode, struct file *filp, +extern int drm_unlock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); @@ -1013,8 +1019,7 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data); * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. */ -extern int drm_i_have_hw_lock(struct file *filp); -extern int drm_kernel_take_hw_lock(struct file *filp); +extern int drm_i_have_hw_lock(struct drm_file *file_priv); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); @@ -1023,21 +1028,21 @@ extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request extern int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr); -extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, +extern int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); -extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, +extern int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_addbufs(struct inode *inode, struct file *filp, +extern int drm_addbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_infobufs(struct inode *inode, struct file *filp, +extern int drm_infobufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_markbufs(struct inode *inode, struct file *filp, +extern int drm_markbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_freebufs(struct inode *inode, struct file *filp, +extern int drm_freebufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_mapbufs(struct inode *inode, struct file *filp, +extern int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_order(unsigned long size); extern unsigned long drm_get_resource_start(struct drm_device *dev, @@ -1052,10 +1057,11 @@ extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, extern int drm_dma_setup(struct drm_device *dev); extern void drm_dma_takedown(struct drm_device *dev); extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); -extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); +extern void drm_core_reclaim_buffers(struct drm_device *dev, + struct drm_file *filp); /* IRQ support (drm_irq.h) */ -extern int drm_control(struct inode *inode, struct file *filp, +extern int drm_control(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); extern int drm_irq_uninstall(struct drm_device *dev); @@ -1063,7 +1069,7 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev); extern void drm_driver_irq_uninstall(struct drm_device *dev); -extern int drm_wait_vblank(struct inode *inode, struct file *filp, +extern int drm_wait_vblank(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern void drm_vbl_send_signals(struct drm_device *dev); @@ -1072,28 +1078,31 @@ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_de /* AGP/GART support (drm_agpsupport.h) */ extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); extern int drm_agp_acquire(struct drm_device *dev); -extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_agp_acquire_ioctl(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern int drm_agp_release(struct drm_device *dev); -extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_agp_release_ioctl(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_agp_enable_ioctl(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); -extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); @@ -1128,10 +1137,10 @@ extern int drm_proc_cleanup(int minor, /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); -extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, +extern int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); -extern int drm_sg_free(struct inode *inode, struct file *filp, +extern int drm_sg_free(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 57c88638..ab7b8c90 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -40,7 +40,7 @@ * Get AGP information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. @@ -70,10 +70,9 @@ int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) } EXPORT_SYMBOL(drm_agp_info); -int drm_agp_info_ioctl(struct inode *inode, struct file *filp, +int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->head->dev; struct drm_agp_info info; int err; @@ -123,7 +122,7 @@ EXPORT_SYMBOL(drm_agp_acquire); * Acquire the AGP device (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or a negative number on failure. @@ -131,12 +130,10 @@ EXPORT_SYMBOL(drm_agp_acquire); * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, +int drm_agp_acquire_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - - return drm_agp_acquire( (struct drm_device *) priv->head->dev ); + return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); } /** @@ -162,12 +159,11 @@ int drm_agp_release(struct drm_device *dev) } EXPORT_SYMBOL(drm_agp_release); -int drm_agp_release_ioctl(struct inode *inode, struct file *filp, +int drm_agp_release_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; - + struct drm_device *dev = file_priv->head->dev; + return drm_agp_release(dev); } @@ -198,11 +194,10 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) } EXPORT_SYMBOL(drm_agp_enable); -int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, +int drm_agp_enable_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_mode mode; @@ -216,7 +211,7 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Allocate AGP memory. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv file private pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. @@ -259,11 +254,10 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) EXPORT_SYMBOL(drm_agp_alloc); -int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, +int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_buffer request; struct drm_agp_buffer __user *argp = (void __user *)arg; int err; @@ -315,7 +309,7 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, * Unbind AGP memory from the GATT (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. @@ -342,11 +336,10 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_unbind); -int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, +int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -361,7 +354,7 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * Bind AGP memory into the GATT (ioctl) * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. @@ -393,11 +386,10 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_bind); -int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, +int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -412,7 +404,7 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * Free AGP memory (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. @@ -443,11 +435,10 @@ EXPORT_SYMBOL(drm_agp_free); -int drm_agp_free_ioctl(struct inode *inode, struct file *filp, +int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_buffer request; if (copy_from_user diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index 4c48d872..f10a57b1 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -127,27 +127,26 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) * Get a unique magic number (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a resulting drm_auth structure. * \return zero on success, or a negative number on failure. * * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p - * filp. + * file_priv. */ -int drm_getmagic(struct inode *inode, struct file *filp, +int drm_getmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_auth auth; /* Find unique magic */ - if (priv->magic) { - auth.magic = priv->magic; + if (file_priv->magic) { + auth.magic = file_priv->magic; } else { do { spin_lock(&lock); @@ -156,8 +155,8 @@ int drm_getmagic(struct inode *inode, struct file *filp, auth.magic = sequence++; spin_unlock(&lock); } while (drm_find_file(dev, auth.magic)); - priv->magic = auth.magic; - drm_add_magic(dev, priv, auth.magic); + file_priv->magic = auth.magic; + drm_add_magic(dev, file_priv, auth.magic); } DRM_DEBUG("%u\n", auth.magic); @@ -170,18 +169,17 @@ int drm_getmagic(struct inode *inode, struct file *filp, * Authenticate with a magic. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_auth structure. * \return zero if authentication successed, or a negative number otherwise. * - * Checks if \p filp is associated with the magic number passed in \arg. + * Checks if \p file_priv is associated with the magic number passed in \arg. */ -int drm_authmagic(struct inode *inode, struct file *filp, +int drm_authmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_auth auth; struct drm_file *file; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 374be04e..671c6232 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -505,7 +505,8 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) } } -static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) +static void drm_bo_base_deref_locked(struct drm_file * file_priv, + struct drm_user_object * uo) { struct drm_buffer_object *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); @@ -535,13 +536,13 @@ static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) * and deregister fence object usage. */ -int drm_fence_buffer_objects(struct drm_file * priv, +int drm_fence_buffer_objects(struct drm_file * file_priv, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *entry; @@ -921,21 +922,21 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, * Call dev->struct_mutex locked. */ -struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, uint32_t handle, int check_owner) { struct drm_user_object *uo; struct drm_buffer_object *bo; - uo = drm_lookup_user_object(priv, handle); + uo = drm_lookup_user_object(file_priv, handle); if (!uo || (uo->type != drm_buffer_type)) { DRM_ERROR("Could not find buffer object 0x%08x\n", handle); return NULL; } - if (check_owner && priv != uo->owner) { - if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE)) + if (check_owner && file_priv != uo->owner) { + if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) return NULL; } @@ -1102,17 +1103,17 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, * unregistered. */ -static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, +static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { struct drm_buffer_object *bo; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) @@ -1169,7 +1170,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, } mutex_lock(&dev->struct_mutex); - ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); mutex_unlock(&dev->struct_mutex); if (ret) { if (atomic_add_negative(-1, &bo->mapped)) @@ -1183,28 +1184,28 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, return ret; } -static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) +static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; struct drm_ref_object *ro; int ret = 0; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); if (!bo) { ret = -EINVAL; goto out; } - ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); if (!ro) { ret = -EINVAL; goto out; } - drm_remove_ref_object(priv, ro); + drm_remove_ref_object(file_priv, ro); drm_bo_usage_deref_locked(&bo); out: mutex_unlock(&dev->struct_mutex); @@ -1215,7 +1216,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) * Call struct-sem locked. */ -static void drm_buffer_user_object_unmap(struct drm_file * priv, +static void drm_buffer_user_object_unmap(struct drm_file *file_priv, struct drm_user_object * uo, enum drm_ref_type action) { @@ -1489,19 +1490,19 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return 0; } -static int drm_bo_handle_validate(struct drm_file * priv, +static int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { return -EINVAL; @@ -1532,14 +1533,14 @@ static int drm_bo_handle_validate(struct drm_file * priv, return ret; } -static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, +static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { @@ -1554,17 +1555,17 @@ static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, +static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, uint32_t hint, struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { @@ -1672,14 +1673,15 @@ int drm_buffer_object_create(struct drm_device *dev, return ret; } -static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_object * bo, +static int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(priv, &bo->base, shareable); + ret = drm_add_user_object(file_priv, &bo->base, shareable); if (ret) goto out; @@ -1693,9 +1695,9 @@ static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_obje return ret; } -static int drm_bo_lock_test(struct drm_device * dev, struct file *filp) +static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return 0; } @@ -1724,10 +1726,10 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) ret = 0; switch (req->op) { case drm_bo_validate: - ret = drm_bo_lock_test(dev, filp); + ret = drm_bo_lock_test(dev, file_priv); if (ret) break; - ret = drm_bo_handle_validate(priv, req->bo_req.handle, + ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, req->bo_req.fence_class, req->bo_req.flags, req->bo_req.mask, @@ -1779,18 +1781,18 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_lock_test(dev, filp); + ret = drm_bo_lock_test(dev, file_priv); if (ret) goto out; - ret = drm_buffer_object_create(priv->head->dev, + ret = drm_buffer_object_create(file_priv->head->dev, req->size, req->type, req->mask, req->hint, req->page_alignment, req->buffer_start, &entry); if (ret) goto out; - ret = drm_bo_add_user_object(priv, entry, + ret = drm_bo_add_user_object(file_priv, entry, req->mask & DRM_BO_FLAG_SHAREABLE); if (ret) { drm_bo_usage_deref_unlocked(&entry); @@ -1822,12 +1824,12 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) { + uo = drm_lookup_user_object(file_priv, arg.handle); + if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - ret = drm_remove_user_object(priv, uo); + ret = drm_remove_user_object(file_priv, uo); mutex_unlock(&dev->struct_mutex); return ret; @@ -1847,7 +1849,7 @@ int drm_bo_map_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_buffer_object_map(priv, req->handle, req->mask, + ret = drm_buffer_object_map(file_priv, req->handle, req->mask, req->hint, rep); if (ret) return ret; @@ -1868,7 +1870,7 @@ int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_buffer_object_unmap(priv, arg.handle); + ret = drm_buffer_object_unmap(file_priv, arg.handle); return ret; } @@ -1889,12 +1891,12 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(priv, req->handle, + ret = drm_user_object_ref(file_priv, req->handle, drm_buffer_type, &uo); if (ret) return ret; - ret = drm_bo_handle_info(priv, req->handle, rep); + ret = drm_bo_handle_info(file_priv, req->handle, rep); if (ret) return ret; @@ -1915,7 +1917,7 @@ int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type); + ret = drm_user_object_unref(file_priv, arg.handle, drm_buffer_type); return ret; } @@ -1934,7 +1936,7 @@ int drm_bo_info_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_info(priv, req->handle, rep); + ret = drm_bo_handle_info(file_priv, req->handle, rep); if (ret) return ret; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); @@ -1955,7 +1957,7 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_wait(priv, req->handle, + ret = drm_bo_handle_wait(file_priv, req->handle, req->hint, rep); if (ret) return ret; @@ -2407,7 +2409,7 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); ret = -EINVAL; @@ -2448,7 +2450,7 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); ret = drm_bo_lock_mm(dev, arg.mem_type); @@ -2474,7 +2476,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); ret = 0; diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index c1e23b5c..a571b817 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -92,7 +92,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, * Ioctl to specify a range of memory that is available for mapping by a non-root process. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_map structure. * \return zero on success or a negative value on error. @@ -326,19 +326,15 @@ int drm_addmap(struct drm_device *dev, unsigned int offset, EXPORT_SYMBOL(drm_addmap); -int drm_addmap_ioctl(struct inode *inode, struct file *filp, +int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_map map; struct drm_map_list *maplist; struct drm_map __user *argp = (void __user *)arg; int err; - if (!(filp->f_mode & 3)) - return -EACCES; /* Require read/write */ - if (copy_from_user(&map, argp, sizeof(map))) { return -EFAULT; } @@ -366,7 +362,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * isn't in use. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. @@ -455,11 +451,10 @@ EXPORT_SYMBOL(drm_rmmap); * gets used by drivers that the server doesn't need to care about. This seems * unlikely. */ -int drm_rmmap_ioctl(struct inode *inode, struct file *filp, +int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_map request; drm_local_map_t *map = NULL; struct drm_map_list *r_list; @@ -667,7 +662,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -878,7 +873,7 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, @@ -1056,7 +1051,7 @@ static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -1217,7 +1212,7 @@ int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -1282,7 +1277,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); * Add buffers for DMA transfers (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. @@ -1292,12 +1287,11 @@ EXPORT_SYMBOL(drm_addbufs_fb); * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ -int drm_addbufs(struct inode *inode, struct file *filp, +int drm_addbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { struct drm_buf_desc request; - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1336,7 +1330,7 @@ int drm_addbufs(struct inode *inode, struct file *filp, * large buffers can be used for image transfer). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. @@ -1345,11 +1339,10 @@ int drm_addbufs(struct inode *inode, struct file *filp, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_infobufs(struct inode *inode, struct file *filp, +int drm_infobufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_info request; struct drm_buf_info __user *argp = (void __user *)arg; @@ -1423,7 +1416,7 @@ int drm_infobufs(struct inode *inode, struct file *filp, * Specifies a low and high water mark for buffer allocation * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg a pointer to a drm_buf_desc structure. * \return zero on success or a negative number on failure. @@ -1433,11 +1426,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, * * \note This ioctl is deprecated and mostly never used. */ -int drm_markbufs(struct inode *inode, struct file *filp, +int drm_markbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_desc request; int order; @@ -1475,7 +1467,7 @@ int drm_markbufs(struct inode *inode, struct file *filp, * Unreserve the buffers in list, previously reserved using drmDMA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. @@ -1483,11 +1475,10 @@ int drm_markbufs(struct inode *inode, struct file *filp, * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ -int drm_freebufs(struct inode *inode, struct file *filp, +int drm_freebufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_free request; int i; @@ -1514,7 +1505,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, return -EINVAL; } buf = dma->buflist[idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", current->pid); return -EINVAL; @@ -1529,7 +1520,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, * Maps all of the DMA buffers into client-virtual space (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. @@ -1539,11 +1530,10 @@ int drm_freebufs(struct inode *inode, struct file *filp, * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ -int drm_mapbufs(struct inode *inode, struct file *filp, +int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; @@ -1584,14 +1574,14 @@ int drm_mapbufs(struct inode *inode, struct file *filp, goto done; } down_write(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, map->size, + virtual = do_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, token); up_write(¤t->mm->mmap_sem); } else { down_write(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, dma->byte_count, + virtual = do_mmap(file_priv->filp, 0, dma->byte_count, PROT_READ | PROT_WRITE, MAP_SHARED, 0); up_write(¤t->mm->mmap_sem); diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index a0b1a7ec..76e13f65 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -132,7 +132,7 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev) * Get per-context SAREA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. @@ -140,11 +140,10 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev) * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ -int drm_getsareactx(struct inode *inode, struct file *filp, +int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx_priv_map __user *argp = (void __user *)arg; struct drm_ctx_priv_map request; struct drm_map *map; @@ -183,7 +182,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp, * Set per-context SAREA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. @@ -191,11 +190,10 @@ int drm_getsareactx(struct inode *inode, struct file *filp, * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ -int drm_setsareactx(struct inode *inode, struct file *filp, +int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx_priv_map request; struct drm_map *map = NULL; struct drm_map_list *r_list = NULL; @@ -293,12 +291,12 @@ static int drm_context_switch_complete(struct drm_device *dev, int new) * Reserve contexts. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ -int drm_resctx(struct inode *inode, struct file *filp, +int drm_resctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { struct drm_ctx_res res; @@ -328,18 +326,17 @@ int drm_resctx(struct inode *inode, struct file *filp, * Add context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Get a new handle for the context and copy to userspace. */ -int drm_addctx(struct inode *inode, struct file *filp, +int drm_addctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx_list *ctx_entry; struct drm_ctx __user *argp = (void __user *)arg; struct drm_ctx ctx; @@ -375,7 +372,7 @@ int drm_addctx(struct inode *inode, struct file *filp, INIT_LIST_HEAD(&ctx_entry->head); ctx_entry->handle = ctx.handle; - ctx_entry->tag = priv; + ctx_entry->tag = file_priv; mutex_lock(&dev->ctxlist_mutex); list_add(&ctx_entry->head, &dev->ctxlist); @@ -387,7 +384,7 @@ int drm_addctx(struct inode *inode, struct file *filp, return 0; } -int drm_modctx(struct inode *inode, struct file *filp, +int drm_modctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { /* This does nothing */ @@ -398,12 +395,12 @@ int drm_modctx(struct inode *inode, struct file *filp, * Get context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ -int drm_getctx(struct inode *inode, struct file *filp, +int drm_getctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { struct drm_ctx __user *argp = (void __user *)arg; @@ -424,18 +421,17 @@ int drm_getctx(struct inode *inode, struct file *filp, * Switch context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch(). */ -int drm_switchctx(struct inode *inode, struct file *filp, +int drm_switchctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -449,18 +445,17 @@ int drm_switchctx(struct inode *inode, struct file *filp, * New context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch_complete(). */ -int drm_newctx(struct inode *inode, struct file *filp, +int drm_newctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -476,18 +471,17 @@ int drm_newctx(struct inode *inode, struct file *filp, * Remove context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ -int drm_rmctx(struct inode *inode, struct file *filp, +int drm_rmctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -495,7 +489,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, DRM_DEBUG("%d\n", ctx.handle); if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { - priv->remove_auth_on_close = 1; + file_priv->remove_auth_on_close = 1; } if (ctx.handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index d2a88d52..7cc44193 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -136,7 +136,7 @@ void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) buf->waiting = 0; buf->pending = 0; - buf->filp = NULL; + buf->file_priv = NULL; buf->used = 0; if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) @@ -148,11 +148,12 @@ void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) /** * Reclaim the buffers. * - * \param filp file pointer. + * \param file_priv DRM file private. * - * Frees each buffer associated with \p filp not already on the hardware. + * Frees each buffer associated with \p file_priv not already on the hardware. */ -void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) +void drm_core_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int i; @@ -160,7 +161,7 @@ void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) if (!dma) return; for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->filp == filp) { + if (dma->buflist[i]->file_priv == file_priv) { switch (dma->buflist[i]->list) { case DRM_LIST_NONE: drm_free_buffer(dev, dma->buflist[i]); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 84efbfe7..92b07729 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -51,7 +51,7 @@ static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; -static int drm_version(struct inode *inode, struct file *filp, +static int drm_version(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /** Ioctl table */ @@ -276,7 +276,7 @@ int drm_lastclose(struct drm_device * dev) if (dev->lock.hw_lock) { dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ - dev->lock.filp = NULL; + dev->lock.file_priv = NULL; wake_up_interruptible(&dev->lock.lock_queue); } dev->dev_mapping = NULL; @@ -538,18 +538,17 @@ module_exit(drm_core_exit); * Get version information * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_version structure. * \return zero on success or negative number on failure. * * Fills in the version information in \p arg. */ -static int drm_version(struct inode *inode, struct file *filp, +static int drm_version(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_version __user *argp = (void __user *)arg; struct drm_version version; int len; @@ -573,7 +572,7 @@ static int drm_version(struct inode *inode, struct file *filp, * Called whenever a process performs an ioctl on /dev/drm. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. @@ -584,8 +583,8 @@ static int drm_version(struct inode *inode, struct file *filp, int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); @@ -593,11 +592,11 @@ int drm_ioctl(struct inode *inode, struct file *filp, atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); - ++priv->ioctl_count; + ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", - current->pid, cmd, nr, (long)old_encode_dev(priv->head->device), - priv->authenticated); + current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device), + file_priv->authenticated); if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) @@ -619,11 +618,11 @@ int drm_ioctl(struct inode *inode, struct file *filp, DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || - ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || - ((ioctl->flags & DRM_MASTER) && !priv->master)) { + ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || + ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { retcode = -EACCES; } else { - retcode = func(inode, filp, cmd, arg); + retcode = func(inode, file_priv, cmd, arg); } err_i1: atomic_dec(&dev->ioctl_count); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 9b2fa405..3a3035e1 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -582,12 +582,12 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); if (arg.flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_fence_object_create(dev, arg.class, arg.type, arg.flags, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(priv, fence, + ret = drm_fence_add_user_object(file_priv, fence, arg.flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { @@ -630,12 +630,12 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { + uo = drm_lookup_user_object(file_priv, arg.handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - ret = drm_remove_user_object(priv, uo); + ret = drm_remove_user_object(file_priv, uo); mutex_unlock(&dev->struct_mutex); return ret; } @@ -658,10 +658,10 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); + ret = drm_user_object_ref(file_priv, arg.handle, drm_fence_type, &uo); if (ret) return ret; - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); read_lock_irqsave(&fm->lock, flags); arg.class = fence->class; @@ -689,7 +689,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - return drm_user_object_unref(priv, arg.handle, drm_fence_type); + return drm_user_object_unref(file_priv, arg.handle, drm_fence_type); } int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) @@ -709,7 +709,7 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; @@ -741,7 +741,7 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_flush(fence, arg.type); @@ -775,7 +775,7 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_wait(fence, @@ -811,8 +811,8 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); - fence = drm_lookup_fence_object(priv, arg.handle); + LOCK_TEST_WITH_RETURN(dev, file_priv); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_emit(fence, arg.flags, arg.class, @@ -850,12 +850,12 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) DRM_ERROR("Buffer object manager is not initialized\n"); return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_buffer_objects(priv, NULL, arg.flags, + LOCK_TEST_WITH_RETURN(dev, file_priv); + ret = drm_fence_buffer_objects(file_priv, NULL, arg.flags, NULL, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(priv, fence, + ret = drm_fence_add_user_object(file_priv, fence, arg.flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index d542d4e3..0162f113 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -252,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, memset(priv, 0, sizeof(*priv)); filp->private_data = priv; + priv->filp = filp; priv->uid = current->euid; priv->pid = current->pid; priv->minor = minor; @@ -376,7 +377,7 @@ static void drm_object_release(struct file *filp) { * Release file. * * \param inode device inode - * \param filp file pointer. + * \param file_priv DRM file private. * \return zero on success or a negative number on failure. * * If the hardware lock is held then free it, and take it again for the kernel @@ -386,29 +387,28 @@ static void drm_object_release(struct file *filp) { */ int drm_release(struct inode *inode, struct file *filp) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev; + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; int retcode = 0; lock_kernel(); - dev = priv->head->dev; DRM_DEBUG("open_count = %d\n", dev->open_count); if (dev->driver->preclose) - dev->driver->preclose(dev, filp); + dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - current->pid, (long)old_encode_dev(priv->head->device), + current->pid, (long)old_encode_dev(dev), dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { - if (drm_i_have_hw_lock(filp)) { - dev->driver->reclaim_buffers_locked(dev, filp); + if (drm_i_have_hw_lock(file_priv)) { + dev->driver->reclaim_buffers_locked(dev, file_priv); } else { unsigned long _end=jiffies + 3*DRM_HZ; int locked = 0; @@ -434,7 +434,7 @@ int drm_release(struct inode *inode, struct file *filp) "\tI will go on reclaiming the buffers anyway.\n"); } - dev->driver->reclaim_buffers_locked(dev, filp); + dev->driver->reclaim_buffers_locked(dev, file_priv); drm_idlelock_release(&dev->lock); } } @@ -442,12 +442,12 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { drm_idlelock_take(&dev->lock); - dev->driver->reclaim_buffers_idlelocked(dev, filp); + dev->driver->reclaim_buffers_idlelocked(dev, file_priv); drm_idlelock_release(&dev->lock); } - if (drm_i_have_hw_lock(filp)) { + if (drm_i_have_hw_lock(file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); @@ -458,7 +458,7 @@ int drm_release(struct inode *inode, struct file *filp) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) { - dev->driver->reclaim_buffers(dev, filp); + dev->driver->reclaim_buffers(dev, file_priv); } drm_fasync(-1, filp, 0); @@ -469,7 +469,7 @@ int drm_release(struct inode *inode, struct file *filp) struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->tag == priv && + if (pos->tag == file_priv && pos->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) dev->driver->context_dtor(dev, @@ -487,18 +487,18 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); drm_object_release(filp); - if (priv->remove_auth_on_close == 1) { + if (file_priv->remove_auth_on_close == 1) { struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) temp->authenticated = 0; } - list_del(&priv->lhead); + list_del(&file_priv->lhead); mutex_unlock(&dev->struct_mutex); if (dev->driver->postclose) - dev->driver->postclose(dev, priv); - drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + dev->driver->postclose(dev, file_priv); + drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); /* ======================================================== * End inline drm_release diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index b1162785..558376de 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -1040,7 +1040,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = { * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/drm. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index a2c3952c..6f0ef149 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -42,18 +42,17 @@ * Get the bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. * * Copies the bus id from drm_device::unique into user space. */ -int drm_getunique(struct inode *inode, struct file *filp, +int drm_getunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_unique __user *argp = (void __user *)arg; struct drm_unique u; @@ -73,7 +72,7 @@ int drm_getunique(struct inode *inode, struct file *filp, * Set the bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. @@ -83,11 +82,10 @@ int drm_getunique(struct inode *inode, struct file *filp, * in interface version 1.1 and will return EBUSY when setversion has requested * version 1.1 or greater. */ -int drm_setunique(struct inode *inode, struct file *filp, +int drm_setunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_unique u; int domain, bus, slot, func, ret; @@ -167,7 +165,7 @@ static int drm_set_busid(struct drm_device * dev) * Get a mapping information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_map structure. * @@ -176,11 +174,10 @@ static int drm_set_busid(struct drm_device * dev) * Searches for the mapping with the specified offset and copies its information * into userspace */ -int drm_getmap(struct inode *inode, struct file *filp, +int drm_getmap(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_map __user *argp = (void __user *)arg; struct drm_map map; struct drm_map_list *r_list = NULL; @@ -228,7 +225,7 @@ int drm_getmap(struct inode *inode, struct file *filp, * Get client information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_client structure. * @@ -237,11 +234,10 @@ int drm_getmap(struct inode *inode, struct file *filp, * Searches for the client with the specified index and copies its information * into userspace */ -int drm_getclient(struct inode *inode, struct file *filp, +int drm_getclient(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_client __user *argp = (struct drm_client __user *)arg; struct drm_client client; struct drm_file *pt; @@ -280,17 +276,16 @@ int drm_getclient(struct inode *inode, struct file *filp, * Get statistics information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_stats structure. * * \return zero on success or a negative number on failure. */ -int drm_getstats(struct inode *inode, struct file *filp, +int drm_getstats(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_stats stats; int i; @@ -320,7 +315,7 @@ int drm_getstats(struct inode *inode, struct file *filp, * Setversion ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. @@ -372,7 +367,7 @@ int drm_setversion(DRM_IOCTL_ARGS) } /** No-op ioctl. */ -int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd, +int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { DRM_DEBUG("\n"); diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 140ceca6..36df557b 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -41,7 +41,7 @@ * Get interrupt from bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_irq_busid structure. * \return zero on success or a negative number on failure. @@ -50,11 +50,10 @@ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal * to that of the device that this DRM instance attached to. */ -int drm_irq_by_busid(struct inode *inode, struct file *filp, +int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_irq_busid __user *argp = (void __user *)arg; struct drm_irq_busid p; @@ -185,18 +184,17 @@ EXPORT_SYMBOL(drm_irq_uninstall); * IRQ control ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_control structure. * \return zero on success or a negative number on failure. * * Calls irq_install() or irq_uninstall() according to \p arg. */ -int drm_control(struct inode *inode, struct file *filp, +int drm_control(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ @@ -225,7 +223,7 @@ int drm_control(struct inode *inode, struct file *filp, * Wait for VBLANK. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param data user argument, pointing to a drm_wait_vblank structure. * \return zero on success or a negative number on failure. @@ -242,8 +240,7 @@ int drm_control(struct inode *inode, struct file *filp, */ int drm_wait_vblank(DRM_IOCTL_ARGS) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; union drm_wait_vblank __user *argp = (void __user *)data; union drm_wait_vblank vblwait; struct timeval now; diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index f3685ce0..54e34e14 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -41,23 +41,22 @@ static int drm_notifier(void *priv); * Lock ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Add the current task to the lock wait queue, and attempt to take to lock. */ -int drm_lock(struct inode *inode, struct file *filp, +int drm_lock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; DECLARE_WAITQUEUE(entry, current); struct drm_lock lock; int ret = 0; - ++priv->lock_count; + ++file_priv->lock_count; if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; @@ -88,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp, break; } if (drm_lock_take(&dev->lock, lock.context)) { - dev->lock.filp = filp; + dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ @@ -142,18 +141,17 @@ int drm_lock(struct inode *inode, struct file *filp, * Unlock ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Transfer and free the lock. */ -int drm_unlock(struct inode *inode, struct file *filp, +int drm_unlock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_lock lock; unsigned long irqflags; @@ -258,7 +256,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; - lock_data->filp = NULL; + lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -391,13 +389,13 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) EXPORT_SYMBOL(drm_idlelock_release); -int drm_i_have_hw_lock(struct file *filp) +int drm_i_have_hw_lock(struct drm_file *file_priv) { DRM_DEVICE; - return (priv->lock_count && dev->lock.hw_lock && + return (file_priv->lock_count && dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.filp == filp); + dev->lock.file_priv == file_priv); } EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 3d2ad779..3f143833 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -6,10 +6,8 @@ #include /* For task queue support */ #include -/** File pointer type */ -#define DRMFILE struct file * /** Ioctl arguments */ -#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data +#define DRM_IOCTL_ARGS struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long data /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) @@ -51,8 +49,7 @@ /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() /** DRM device local declaration */ -#define DRM_DEVICE struct drm_file *priv = filp->private_data; \ - struct drm_device *dev = priv->head->dev +#define DRM_DEVICE struct drm_device *dev = file_priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg @@ -116,8 +113,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define DRM_GET_USER_UNCHECKED(val, uaddr) \ __get_user(val, uaddr) -#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data - #define DRM_HZ HZ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 7c13610d..58696347 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -187,10 +187,10 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) } EXPORT_SYMBOL(drm_sg_alloc); -int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, +int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; struct drm_scatter_gather __user *argp = (void __user *)arg; struct drm_scatter_gather request; int ret; @@ -198,11 +198,11 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; - ret = drm_sg_alloc(priv->head->dev, &request); + ret = drm_sg_alloc(dev, &request); if ( ret ) return ret; if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(priv->head->dev->sg); + drm_sg_cleanup(dev->sg); return -EFAULT; } @@ -211,11 +211,10 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, } -int drm_sg_free(struct inode *inode, struct file *filp, +int drm_sg_free(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_scatter_gather request; struct drm_sg_mem *entry; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 265a59d8..c4e790ef 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -477,7 +477,7 @@ static void drm_vm_close(struct vm_area_struct *vma) /** * mmap DMA memory. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * @@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); /** * mmap DMA memory. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * @@ -865,7 +865,7 @@ static struct vm_operations_struct drm_bo_vm_ops = { * mmap buffer object memory. * * \param vma virtual memory area. - * \param filp file pointer. + * \param file_priv DRM file private. * \param map The buffer object drm map. * \return zero on success or a negative number on failure. */ diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 4b43647e..1e74d792 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -139,10 +139,9 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(struct drm_buf * buf, struct file *filp) +static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; const struct file_operations *old_fops; @@ -152,14 +151,14 @@ static int i810_map_buffer(struct drm_buf * buf, struct file *filp) return -EINVAL; down_write(¤t->mm->mmap_sem); - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; + old_fops = file_priv->filp->f_op; + file_priv->filp->f_op = &i810_buffer_fops; dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE, MAP_SHARED, buf->bus_address); dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; + file_priv->filp->f_op = old_fops; if (IS_ERR(buf_priv->virtual)) { /* Real error */ DRM_ERROR("mmap error\n"); @@ -192,7 +191,7 @@ static int i810_unmap_buffer(struct drm_buf * buf) } static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, - struct file *filp) + struct drm_file *file_priv) { struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; @@ -205,13 +204,13 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, return retcode; } - retcode = i810_map_buffer(buf, filp); + retcode = i810_map_buffer(buf, file_priv); if (retcode) { i810_freelist_put(dev, buf); DRM_ERROR("mapbuf failed, retcode %d\n", retcode); return retcode; } - buf->filp = filp; + buf->file_priv = file_priv; buf_priv = buf->dev_private; d->granted = 1; d->request_idx = buf->idx; @@ -492,11 +491,10 @@ static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) return 0; } -static int i810_dma_init(struct inode *inode, struct file *filp, +static int i810_dma_init(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv; drm_i810_init_t init; int retcode = 0; @@ -987,7 +985,8 @@ static int i810_flush_queue(struct drm_device * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) +static void i810_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int i; @@ -1005,7 +1004,7 @@ static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - if (buf->filp == filp && buf_priv) { + if (buf->file_priv == file_priv && buf_priv) { int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); @@ -1017,23 +1016,21 @@ static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) } } -static int i810_flush_ioctl(struct inode *inode, struct file *filp, +static int i810_flush_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); i810_flush_queue(dev); return 0; } -static int i810_dma_vertex(struct inode *inode, struct file *filp, +static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; @@ -1045,7 +1042,7 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", vertex.idx, vertex.used, vertex.discard); @@ -1065,18 +1062,17 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, return 0; } -static int i810_clear_bufs(struct inode *inode, struct file *filp, +static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_clear_t clear; if (copy_from_user (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); /* GH: Someone's doing nasty things... */ if (!dev->dev_private) { @@ -1088,25 +1084,24 @@ static int i810_clear_bufs(struct inode *inode, struct file *filp, return 0; } -static int i810_swap_bufs(struct inode *inode, struct file *filp, +static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; DRM_DEBUG("i810_swap_bufs\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); i810_dma_dispatch_swap(dev); return 0; } -static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, +static int i810_getage(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1116,11 +1111,10 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_getbuf(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int retcode = 0; drm_i810_dma_t d; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; @@ -1131,11 +1125,11 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); d.granted = 0; - retcode = i810_dma_get_buffer(dev, &d, filp); + retcode = i810_dma_get_buffer(dev, &d, file_priv); DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", current->pid, retcode, d.granted); @@ -1147,15 +1141,15 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, return retcode; } -static int i810_copybuf(struct inode *inode, - struct file *filp, unsigned int cmd, unsigned long arg) +static int i810_copybuf(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { /* Never copy - 2.4.x doesn't need it */ return 0; } -static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_docopy(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { /* Never copy - 2.4.x doesn't need it */ return 0; @@ -1221,11 +1215,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, ADVANCE_LP_RING(); } -static int i810_dma_mc(struct inode *inode, struct file *filp, +static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; @@ -1236,7 +1229,7 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (mc.idx >= dma->buf_count || mc.idx < 0) return -EINVAL; @@ -1252,21 +1245,19 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, return 0; } -static int i810_rstatus(struct inode *inode, struct file *filp, +static int i810_rstatus(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); } -static int i810_ov0_info(struct inode *inode, struct file *filp, +static int i810_ov0_info(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; drm_i810_overlay_t data; @@ -1278,25 +1269,23 @@ static int i810_ov0_info(struct inode *inode, struct file *filp, return 0; } -static int i810_fstatus(struct inode *inode, struct file *filp, +static int i810_fstatus(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return I810_READ(0x30008); } -static int i810_ov0_flip(struct inode *inode, struct file *filp, +static int i810_ov0_flip(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); //Tell the overlay to update I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); @@ -1327,16 +1316,15 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev) return 0; } -static int i810_flip_bufs(struct inode *inode, struct file *filp, +static int i810_flip_bufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv->page_flipping) i810_do_init_pageflip(dev); @@ -1362,7 +1350,7 @@ void i810_driver_lastclose(struct drm_device * dev) i810_dma_cleanup(dev); } -void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) +void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1372,9 +1360,10 @@ void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) } } -void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) +void i810_driver_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { - i810_reclaim_buffers(dev, filp); + i810_reclaim_buffers(dev, file_priv); } int i810_driver_dma_quiescent(struct drm_device * dev) diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 3627d774..c525e165 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -117,12 +117,13 @@ typedef struct drm_i810_private { /* i810_dma.c */ extern int i810_driver_dma_quiescent(struct drm_device * dev); extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, - struct file *filp); + struct drm_file *file_priv); extern int i810_driver_load(struct drm_device *, unsigned long flags); extern void i810_driver_lastclose(struct drm_device * dev); -extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void i810_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, - struct file *filp); + struct drm_file *file_priv); extern int i810_driver_device_is_agp(struct drm_device * dev); extern struct drm_ioctl_desc i810_ioctls[]; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index f9c7a7e2..0e9ed65d 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -122,7 +122,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) return 0; } -static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, +static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, unsigned long data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; @@ -144,7 +144,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, - (unsigned long)priv); + (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { @@ -190,7 +190,7 @@ static int sis_drm_free(DRM_IOCTL_ARGS) static int sis_fb_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - return sis_drm_alloc(dev, priv, data, VIDEO_TYPE); + return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE); } static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) @@ -225,7 +225,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - return sis_drm_alloc(dev, priv, data, AGP_TYPE); + return sis_drm_alloc(dev, file_priv, data, AGP_TYPE); } static drm_local_map_t *sis_reg_init(struct drm_device *dev) @@ -314,13 +314,13 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) +void sis_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; - struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { + if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { mutex_unlock(&dev->struct_mutex); return; } @@ -329,7 +329,7 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) dev->driver->dma_quiescent(dev); } - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); + drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); return; } diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 5e73bd1a..10289a89 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -805,7 +805,7 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); - if (-EINTR) == err + if (-EINTR == err) err = -EAGAIN; return err; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 7cb8651d..411c3d52 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -151,7 +151,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, - (unsigned long)priv); + (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { mem.offset = ((mem.type == VIA_MEM_VIDEO) ? @@ -190,13 +190,13 @@ int via_mem_free(DRM_IOCTL_ARGS) } -void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) +void via_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { drm_via_private_t *dev_priv = dev->dev_private; - struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { + if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { mutex_unlock(&dev->struct_mutex); return; } @@ -205,7 +205,7 @@ void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) dev->driver->dma_quiescent(dev); } - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); + drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); return; } diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 81e6981d..05336d35 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -654,7 +654,7 @@ static int i915_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return i915_quiescent(dev); } @@ -679,7 +679,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", batch.start, batch.used, batch.num_cliprects); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, batch.num_cliprects * @@ -707,7 +707,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (cmdbuf.num_cliprects && DRM_VERIFYAREA_READ(cmdbuf.cliprects, @@ -756,7 +756,7 @@ static int i915_flip_bufs(DRM_IOCTL_ARGS) DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_flip_t __user *) data, sizeof(param)); @@ -965,11 +965,11 @@ void i915_driver_lastclose(struct drm_device * dev) i915_dma_cleanup(dev); } -void i915_driver_preclose(struct drm_device * dev, DRMFILE filp) +void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; - i915_mem_release(dev, filp, dev_priv->agp_heap); + i915_mem_release(dev, file_priv, dev_priv->agp_heap); } } diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 1a2220a5..73b3d187 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -81,7 +81,7 @@ struct mem_block { struct mem_block *prev; int start; int size; - DRMFILE filp; /* 0: free, -1: heap, other: real files */ + struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; typedef struct _drm_i915_vbl_swap { @@ -152,7 +152,8 @@ extern int i915_max_ioctl; extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern void i915_driver_lastclose(struct drm_device * dev); -extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void i915_driver_preclose(struct drm_device *dev, + struct drm_file *file_priv); extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); @@ -185,7 +186,8 @@ extern int i915_mem_init_heap(DRM_IOCTL_ARGS); extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, - DRMFILE filp, struct mem_block *heap); + struct drm_file *file_priv, + struct mem_block *heap); #ifdef I915_HAVE_FENCE /* i915_fence.c */ diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index f4775b75..da61997e 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -456,7 +456,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS) drm_i915_irq_emit_t emit; int result; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -623,7 +623,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if ((curseq - swap.sequence) <= (1<<23)) { struct drm_drawable_info *drw; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index 381562d8..e2e7018d 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -89,7 +89,7 @@ static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) */ static struct mem_block *split_block(struct mem_block *p, int start, int size, - DRMFILE filp) + struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { @@ -99,7 +99,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, goto out; newblock->start = start; newblock->size = p->size - (start - p->start); - newblock->filp = NULL; + newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; @@ -116,7 +116,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, goto out; newblock->start = start + size; newblock->size = p->size - size; - newblock->filp = NULL; + newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; @@ -126,20 +126,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, out: /* Our block is in the middle */ - p->filp = filp; + p->file_priv = file_priv; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, - int align2, DRMFILE filp) + int align2, struct drm_file *file_priv) { struct mem_block *p; int mask = (1 << align2) - 1; for (p = heap->next; p != heap; p = p->next) { int start = (p->start + mask) & ~mask; - if (p->filp == NULL && start + size <= p->start + p->size) - return split_block(p, start, size, filp); + if (p->file_priv == NULL && start + size <= p->start + p->size) + return split_block(p, start, size, file_priv); } return NULL; @@ -158,12 +158,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start) static void free_block(struct mem_block *p) { - p->filp = NULL; + p->file_priv = NULL; - /* Assumes a single contiguous range. Needs a special filp in + /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ - if (p->next->filp == NULL) { + if (p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; @@ -171,7 +171,7 @@ static void free_block(struct mem_block *p) drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); } - if (p->prev->filp == NULL) { + if (p->prev->file_priv == NULL) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; @@ -197,18 +197,19 @@ static int init_heap(struct mem_block **heap, int start, int size) blocks->start = start; blocks->size = size; - blocks->filp = NULL; + blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); - (*heap)->filp = (DRMFILE) - 1; + (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ -void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap) +void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, + struct mem_block *heap) { struct mem_block *p; @@ -216,17 +217,17 @@ void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *h return; for (p = heap->next; p != heap; p = p->next) { - if (p->filp == filp) { - p->filp = NULL; + if (p->file_priv == file_priv) { + p->file_priv = NULL; mark_block(dev, p, 0); } } - /* Assumes a single contiguous range. Needs a special filp in + /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ for (p = heap->next; p != heap; p = p->next) { - while (p->filp == NULL && p->next->filp == NULL) { + while (p->file_priv == NULL && p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; @@ -292,7 +293,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) if (alloc.alignment < 12) alloc.alignment = 12; - block = alloc_block(*heap, alloc.size, alloc.alignment, filp); + block = alloc_block(*heap, alloc.size, alloc.alignment, file_priv); if (!block) return -ENOMEM; @@ -330,7 +331,7 @@ int i915_mem_free(DRM_IOCTL_ARGS) if (!block) return -EFAULT; - if (block->filp != filp) + if (block->file_priv != file_priv) return -EPERM; mark_block(dev, block, 0); diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index fec73076..25877824 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -1165,7 +1165,7 @@ int mach64_dma_init(DRM_IOCTL_ARGS) DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(init, (drm_mach64_init_t *) data, sizeof(init)); @@ -1187,7 +1187,7 @@ int mach64_dma_idle(DRM_IOCTL_ARGS) DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return mach64_do_dma_idle(dev_priv); } @@ -1199,7 +1199,7 @@ int mach64_dma_flush(DRM_IOCTL_ARGS) DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return mach64_do_dma_flush(dev_priv); } @@ -1211,7 +1211,7 @@ int mach64_engine_reset(DRM_IOCTL_ARGS) DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return mach64_do_engine_reset(dev_priv); } @@ -1461,7 +1461,8 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_b /** \name DMA buffer request and submission IOCTL handler */ /*@{*/ -static int mach64_dma_get_buffers(DRMFILE filp, struct drm_device * dev, +static int mach64_dma_get_buffers(struct drm_device * dev, + struct drm_file *file_priv, struct drm_dma * d) { int i; @@ -1478,7 +1479,7 @@ static int mach64_dma_get_buffers(DRMFILE filp, struct drm_device * dev, return -EAGAIN; #endif - buf->filp = filp; + buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) @@ -1499,7 +1500,7 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) struct drm_dma d; int ret = 0; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma *) data, sizeof(d)); @@ -1522,7 +1523,7 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) d.granted_count = 0; if (d.request_count) { - ret = mach64_dma_get_buffers(filp, dev, &d); + ret = mach64_dma_get_buffers(dev, file_priv, &d); } DRM_COPY_TO_USER_IOCTL((struct drm_dma *) data, d, sizeof(d)); diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index 5d83c861..aa9afcab 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -842,7 +842,7 @@ do { \ } while(0) /* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */ -#define DMAGETPTR( filp, dev_priv, n ) \ +#define DMAGETPTR( file_priv, dev_priv, n ) \ do { \ if ( MACH64_VERBOSE ) { \ DRM_INFO( "DMAGETPTR( %d ) in %s\n", \ @@ -859,7 +859,7 @@ do { \ __FUNCTION__ ); \ return -EFAULT; \ } \ - _buf->filp = filp; \ + _buf->file_priv = file_priv; \ _outcount = 0; \ \ _buf_wptr = GETBUFPTR( _buf ); \ diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 397faaaa..c89573e7 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -85,7 +85,8 @@ static void mach64_print_dirty(const char *msg, unsigned int flags) /* This function returns 0 on success, 1 for no intersection, and * negative for an error */ -static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv, +static int mach64_emit_cliprect(struct drm_file *file_priv, + drm_mach64_private_t * dev_priv, struct drm_clip_rect * box) { u32 sc_left_right, sc_top_bottom; @@ -120,7 +121,7 @@ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv, if (scissor.y1 >= scissor.y2) return 1; - DMAGETPTR(filp, dev_priv, 2); /* returns on failure to get buffer */ + DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */ sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16)); sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16)); @@ -133,7 +134,7 @@ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv, return 0; } -static __inline__ int mach64_emit_state(DRMFILE filp, +static __inline__ int mach64_emit_state(struct drm_file *file_priv, drm_mach64_private_t * dev_priv) { drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -148,7 +149,7 @@ static __inline__ int mach64_emit_state(DRMFILE filp, DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty); } - DMAGETPTR(filp, dev_priv, 17); /* returns on failure to get buffer */ + DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */ if (dirty & MACH64_UPLOAD_MISC) { DMAOUTREG(MACH64_DP_MIX, regs->dp_mix); @@ -212,7 +213,8 @@ static __inline__ int mach64_emit_state(DRMFILE filp, * DMA command dispatch functions */ -static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, +static int mach64_dma_dispatch_clear(struct drm_device * dev, + struct drm_file *file_priv, unsigned int flags, int cx, int cy, int cw, int ch, unsigned int clear_color, @@ -254,7 +256,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, if (!nbox) return 0; - DMAGETPTR(filp, dev_priv, nbox * 31); /* returns on failure to get buffer */ + DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */ for (i = 0; i < nbox; i++) { int x = pbox[i].x1; @@ -355,7 +357,8 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, return 0; } -static int mach64_dma_dispatch_swap(DRMFILE filp, struct drm_device * dev) +static int mach64_dma_dispatch_swap(struct drm_device * dev, + struct drm_file *file_priv) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -380,7 +383,7 @@ static int mach64_dma_dispatch_swap(DRMFILE filp, struct drm_device * dev) if (!nbox) return 0; - DMAGETPTR(filp, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */ + DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */ DMAOUTREG(MACH64_Z_CNTL, 0); DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); @@ -545,7 +548,8 @@ static __inline__ int copy_from_user_vertex(u32 *to, } } -static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, +static int mach64_dma_dispatch_vertex(struct drm_device * dev, + struct drm_file *file_priv, drm_mach64_vertex_t * vertex) { drm_mach64_private_t *dev_priv = dev->dev_private; @@ -583,7 +587,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, DMASETPTR(copy_buf); if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) { - ret = mach64_emit_state(filp, dev_priv); + ret = mach64_emit_state(file_priv, dev_priv); if (ret < 0) return ret; } @@ -591,7 +595,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, do { /* Emit the next cliprect */ if (i < sarea_priv->nbox) { - ret = mach64_emit_cliprect(filp, dev_priv, + ret = mach64_emit_cliprect(file_priv, dev_priv, &sarea_priv->boxes[i]); if (ret < 0) { /* failed to get buffer */ @@ -640,7 +644,8 @@ static __inline__ int copy_from_user_blit(u32 *to, return 0; } -static int mach64_dma_dispatch_blit(DRMFILE filp, struct drm_device * dev, +static int mach64_dma_dispatch_blit(struct drm_device * dev, + struct drm_file *file_priv, drm_mach64_blit_t * blit) { drm_mach64_private_t *dev_priv = dev->dev_private; @@ -763,7 +768,7 @@ int mach64_dma_clear(DRM_IOCTL_ARGS) DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(clear, (drm_mach64_clear_t *) data, sizeof(clear)); @@ -771,7 +776,7 @@ int mach64_dma_clear(DRM_IOCTL_ARGS) if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; - ret = mach64_dma_dispatch_clear(filp, dev, clear.flags, + ret = mach64_dma_dispatch_clear(dev, file_priv, clear.flags, clear.x, clear.y, clear.w, clear.h, clear.clear_color, clear.clear_depth); @@ -790,12 +795,12 @@ int mach64_dma_swap(DRM_IOCTL_ARGS) DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; - ret = mach64_dma_dispatch_swap(filp, dev); + ret = mach64_dma_dispatch_swap(dev, file_priv); /* Make sure we restore the 3D state next time. */ @@ -810,7 +815,7 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS) drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_vertex_t vertex; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -838,7 +843,7 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS) if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; - return mach64_dma_dispatch_vertex(filp, dev, &vertex); + return mach64_dma_dispatch_vertex(dev, file_priv, &vertex); } int mach64_dma_blit(DRM_IOCTL_ARGS) @@ -849,12 +854,12 @@ int mach64_dma_blit(DRM_IOCTL_ARGS) drm_mach64_blit_t blit; int ret; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data, sizeof(blit)); - ret = mach64_dma_dispatch_blit(filp, dev, &blit); + ret = mach64_dma_dispatch_blit(dev, file_priv, &blit); /* Make sure we restore the 3D state next time. */ @@ -884,7 +889,7 @@ int mach64_get_param(DRM_IOCTL_ARGS) switch (param.param) { case MACH64_PARAM_FRAMES_QUEUED: /* Needs lock since it calls mach64_ring_tick() */ - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); value = mach64_do_get_frames_queued(dev_priv); break; case MACH64_PARAM_IRQ_NR: diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index cbcb6380..429ffa54 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -1016,7 +1016,7 @@ int mga_dma_init(DRM_IOCTL_ARGS) drm_mga_init_t init; int err; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data, sizeof(init)); @@ -1045,7 +1045,7 @@ int mga_dma_flush(DRM_IOCTL_ARGS) drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; struct drm_lock lock; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, sizeof(lock)); @@ -1080,7 +1080,7 @@ int mga_dma_reset(DRM_IOCTL_ARGS) DRM_DEVICE; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return mga_do_dma_reset(dev_priv); } @@ -1089,7 +1089,8 @@ int mga_dma_reset(DRM_IOCTL_ARGS) * DMA buffer management */ -static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) +static int mga_dma_get_buffers(struct drm_device * dev, + struct drm_file *file_priv, struct drm_dma * d) { struct drm_buf *buf; int i; @@ -1099,7 +1100,7 @@ static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm if (!buf) return -EAGAIN; - buf->filp = filp; + buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) @@ -1122,7 +1123,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) struct drm_dma d; int ret = 0; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); @@ -1147,7 +1148,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) d.granted_count = 0; if (d.request_count) { - ret = mga_dma_get_buffers(filp, dev, &d); + ret = mga_dma_get_buffers(dev, file_priv, &d); } DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index f77883db..196d7d16 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -872,7 +872,7 @@ static int mga_dma_clear(DRM_IOCTL_ARGS) drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_clear_t clear; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data, sizeof(clear)); @@ -897,7 +897,7 @@ static int mga_dma_swap(DRM_IOCTL_ARGS) drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; @@ -922,7 +922,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) drm_mga_buf_priv_t *buf_priv; drm_mga_vertex_t vertex; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(vertex, (drm_mga_vertex_t __user *) data, @@ -962,7 +962,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) drm_mga_buf_priv_t *buf_priv; drm_mga_indices_t indices; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(indices, (drm_mga_indices_t __user *) data, @@ -1003,7 +1003,7 @@ static int mga_dma_iload(DRM_IOCTL_ARGS) drm_mga_iload_t iload; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data, sizeof(iload)); @@ -1045,7 +1045,7 @@ static int mga_dma_blit(DRM_IOCTL_ARGS) drm_mga_blit_t blit; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data, sizeof(blit)); diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 9e11f9b7..7ecfadd2 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -47,7 +47,7 @@ struct mem_block { struct mem_block *prev; uint64_t start; uint64_t size; - DRMFILE filp; /* 0: free, -1: heap, other: real files */ + struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ int flags; drm_local_map_t *map; drm_handle_t map_handle; @@ -95,7 +95,7 @@ struct nouveau_gpuobj_ref { struct nouveau_fifo { /* owner of this fifo */ - DRMFILE filp; + struct drm_file *file_priv; /* mapping of the fifo itself */ drm_local_map_t *map; /* mapping of the regs controling the fifo */ @@ -263,7 +263,8 @@ struct drm_nouveau_private { }; /* nouveau_state.c */ -extern void nouveau_preclose(struct drm_device * dev, DRMFILE filp); +extern void nouveau_preclose(struct drm_device * dev, + struct drm_file *file_priv); extern int nouveau_load(struct drm_device *dev, unsigned long flags); extern int nouveau_firstopen(struct drm_device *dev); extern void nouveau_lastclose(struct drm_device *dev); @@ -278,20 +279,25 @@ extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, uint64_t size); extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, uint64_t size, int align2, - DRMFILE); + struct drm_file *file_priv); extern void nouveau_mem_takedown(struct mem_block **heap); extern void nouveau_mem_free_block(struct mem_block *); extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); -extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap); +extern void nouveau_mem_release(struct drm_file *file_priv, + struct mem_block *heap); extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS); extern int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS); -extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp); +extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, + int alignment, uint64_t size, + int flags, + struct drm_file *file_priv); extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*); extern int nouveau_mem_init(struct drm_device *dev); extern void nouveau_mem_close(struct drm_device *dev); /* nouveau_notifier.c */ -extern int nouveau_notifier_init_channel(struct drm_device *, int channel, DRMFILE); +extern int nouveau_notifier_init_channel(struct drm_device *, int channel, + struct drm_file *file_priv); extern void nouveau_notifier_takedown_channel(struct drm_device *, int channel); extern int nouveau_notifier_alloc(struct drm_device *, int channel, uint32_t handle, int cout, uint32_t *offset); @@ -301,8 +307,10 @@ extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS); extern int nouveau_fifo_init(struct drm_device *dev); extern int nouveau_fifo_number(struct drm_device *dev); extern int nouveau_fifo_ctx_size(struct drm_device *dev); -extern void nouveau_fifo_cleanup(struct drm_device *dev, DRMFILE filp); -extern int nouveau_fifo_owner(struct drm_device *dev, DRMFILE filp, int channel); +extern void nouveau_fifo_cleanup(struct drm_device *dev, + struct drm_file *file_priv); +extern int nouveau_fifo_owner(struct drm_device *dev, + struct drm_file *file_priv, int channel); extern void nouveau_fifo_free(struct drm_device *dev, int channel); /* nouveau_object.c */ @@ -473,7 +481,7 @@ extern int nv04_timer_init(struct drm_device *dev); extern uint64_t nv04_timer_read(struct drm_device *dev); extern void nv04_timer_takedown(struct drm_device *dev); -extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, +extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #if defined(__powerpc__) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index e3a6674d..8731c6a1 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -204,7 +204,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, config->cmdbuf.location | NOUVEAU_MEM_MAPPED, - (DRMFILE)-2); + (struct drm_file *)-2); if (!cb) { DRM_ERROR("Couldn't allocate DMA command buffer.\n"); return -ENOMEM; @@ -264,7 +264,8 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } /* allocates and initializes a fifo for user space consumption */ -int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, DRMFILE filp, +int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, + struct drm_file *file_priv, uint32_t vram_handle, uint32_t tt_handle) { int ret; @@ -298,7 +299,7 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, DRMFILE filp, return -ENOMEM; dev_priv->fifo_alloc_count++; chan = dev_priv->fifos[channel]; - chan->filp = filp; + chan->file_priv = file_priv; DRM_INFO("Allocating FIFO number %d\n", channel); @@ -317,7 +318,7 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, DRMFILE filp, } /* Allocate space for per-channel fixed notifier memory */ - ret = nouveau_notifier_init_channel(dev, channel, filp); + ret = nouveau_notifier_init_channel(dev, channel, file_priv); if (ret) { nouveau_fifo_free(dev, channel); return ret; @@ -441,20 +442,22 @@ void nouveau_fifo_free(struct drm_device *dev, int channel) drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); } -/* cleanups all the fifos from filp */ -void nouveau_fifo_cleanup(struct drm_device *dev, DRMFILE filp) +/* cleanups all the fifos from file_priv */ +void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv) { int i; struct drm_nouveau_private *dev_priv = dev->dev_private; - DRM_DEBUG("clearing FIFO enables from filp\n"); + DRM_DEBUG("clearing FIFO enables from file_priv\n"); for(i=0;ififos[i] && dev_priv->fifos[i]->filp==filp) + if (dev_priv->fifos[i] && + dev_priv->fifos[i]->file_priv==file_priv) nouveau_fifo_free(dev,i); } int -nouveau_fifo_owner(struct drm_device *dev, DRMFILE filp, int channel) +nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv, + int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -462,7 +465,7 @@ nouveau_fifo_owner(struct drm_device *dev, DRMFILE filp, int channel) return 0; if (dev_priv->fifos[channel] == NULL) return 0; - return (dev_priv->fifos[channel]->filp == filp); + return (dev_priv->fifos[channel]->file_priv == file_priv); } /*********************************** @@ -485,7 +488,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) if (init.fb_ctxdma_handle == ~0 || init.tt_ctxdma_handle == ~0) return -EINVAL; - res = nouveau_fifo_alloc(dev, &init.channel, filp, + res = nouveau_fifo_alloc(dev, &init.channel, file_priv, init.fb_ctxdma_handle, init.tt_ctxdma_handle); if (res) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 143378ff..6a4818c5 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -36,7 +36,7 @@ #include "nouveau_drv.h" static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size, - DRMFILE filp) + struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { @@ -46,7 +46,7 @@ static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64 goto out; newblock->start = start; newblock->size = p->size - (start - p->start); - newblock->filp = NULL; + newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; @@ -63,7 +63,7 @@ static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64 goto out; newblock->start = start + size; newblock->size = p->size - size; - newblock->filp = NULL; + newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; @@ -73,12 +73,14 @@ static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64 out: /* Our block is in the middle */ - p->filp = filp; + p->file_priv = file_priv; return p; } -struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, - int align2, DRMFILE filp) +struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, + uint64_t size, + int align2, + struct drm_file *file_priv) { struct mem_block *p; uint64_t mask = (1 << align2) - 1; @@ -88,8 +90,8 @@ struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, list_for_each(p, heap) { uint64_t start = (p->start + mask) & ~mask; - if (p->filp == 0 && start + size <= p->start + p->size) - return split_block(p, start, size, filp); + if (p->file_priv == 0 && start + size <= p->start + p->size) + return split_block(p, start, size, file_priv); } return NULL; @@ -108,12 +110,12 @@ static struct mem_block *find_block(struct mem_block *heap, uint64_t start) void nouveau_mem_free_block(struct mem_block *p) { - p->filp = NULL; + p->file_priv = NULL; - /* Assumes a single contiguous range. Needs a special filp in + /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ - if (p->next->filp == 0) { + if (p->next->file_priv == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; @@ -121,7 +123,7 @@ void nouveau_mem_free_block(struct mem_block *p) drm_free(q, sizeof(*q), DRM_MEM_BUFS); } - if (p->prev->filp == 0) { + if (p->prev->file_priv == 0) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; @@ -148,19 +150,19 @@ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, blocks->start = start; blocks->size = size; - blocks->filp = NULL; + blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); - (*heap)->filp = (DRMFILE) - 1; + (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* - * Free all blocks associated with the releasing filp + * Free all blocks associated with the releasing file_priv */ -void nouveau_mem_release(DRMFILE filp, struct mem_block *heap) +void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; @@ -168,15 +170,16 @@ void nouveau_mem_release(DRMFILE filp, struct mem_block *heap) return; list_for_each(p, heap) { - if (p->filp == filp) - p->filp = NULL; + if (p->file_priv == file_priv) + p->file_priv = NULL; } - /* Assumes a single contiguous range. Needs a special filp in + /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { - while ((p->filp == 0) && (p->next->filp == 0) && (p->next!=heap)) { + while ((p->file_priv == 0) && (p->next->file_priv == 0) && + (p->next!=heap)) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; @@ -424,7 +427,9 @@ int nouveau_mem_init(struct drm_device *dev) return 0; } -struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp) +struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, + uint64_t size, int flags, + struct drm_file *file_priv) { struct mem_block *block; int type; @@ -453,13 +458,14 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6 #define NOUVEAU_MEM_ALLOC_AGP {\ type=NOUVEAU_MEM_AGP;\ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ - alignment, filp);\ + alignment, file_priv); \ if (block) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_PCI {\ type = NOUVEAU_MEM_PCI;\ - block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, alignment, filp);\ + block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ + alignment, file_priv); \ if ( block ) goto alloc_ok;\ } @@ -467,11 +473,12 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6 type=NOUVEAU_MEM_FB;\ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ - size, alignment, filp); \ + size, alignment, \ + file_priv); \ if (block) goto alloc_ok;\ }\ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ - alignment, filp);\ + alignment, file_priv);\ if (block) goto alloc_ok;\ } @@ -556,7 +563,8 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) (struct drm_nouveau_mem_alloc_t __user *) data, sizeof(alloc)); - block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp); + block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, + file_priv); if (!block) return -ENOMEM; alloc.map_handle=block->map_handle; @@ -589,7 +597,7 @@ int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) block = find_block(dev_priv->pci_heap, memfree.offset); if (!block) return -EFAULT; - if (block->filp != filp) + if (block->file_priv != file_priv) return -EPERM; nouveau_mem_free(dev, block); diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 425e471c..6a78bb23 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -30,7 +30,8 @@ #include "nouveau_drv.h" int -nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) +nouveau_notifier_init_channel(struct drm_device *dev, int channel, + struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; @@ -44,7 +45,8 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) flags = NOUVEAU_MEM_FB; flags |= NOUVEAU_MEM_MAPPED; - chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp); + chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, + file_priv); if (!chan->notifier_block) return -ENOMEM; @@ -87,7 +89,8 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, return -EINVAL; } - mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->filp); + mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, + chan->file_priv); if (!mem) { DRM_ERROR("Channel %d notifier block full\n", channel); return -ENOMEM; @@ -135,7 +138,7 @@ nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) (struct drm_nouveau_notifier_alloc __user*)data, sizeof(na)); - if (!nouveau_fifo_owner(dev, filp, na.channel)) { + if (!nouveau_fifo_owner(dev, file_priv, na.channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", DRM_CURRENTPID, na.channel); return -EPERM; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 30d515f0..f11cc115 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -241,7 +241,7 @@ nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, /* Allocate a chunk of the PRAMIN aperture */ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, drm_order(align), - (DRMFILE)-2); + (struct drm_file *)-2); if (!gpuobj->im_pramin) { nouveau_gpuobj_del(dev, &gpuobj); return -ENOMEM; @@ -1035,7 +1035,7 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) (struct drm_nouveau_grobj_alloc_t __user*)data, sizeof(init)); - if (!nouveau_fifo_owner(dev, filp, init.channel)) { + if (!nouveau_fifo_owner(dev, file_priv, init.channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", DRM_CURRENTPID, init.channel); return -EINVAL; diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index b6459957..aea6bcf5 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -353,15 +353,16 @@ static void nouveau_card_takedown(struct drm_device *dev) } } -/* here a client dies, release the stuff that was allocated for its filp */ -void nouveau_preclose(struct drm_device *dev, DRMFILE filp) +/* here a client dies, release the stuff that was allocated for its + * file_priv */ +void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; - nouveau_fifo_cleanup(dev, filp); - nouveau_mem_release(filp,dev_priv->fb_heap); - nouveau_mem_release(filp,dev_priv->agp_heap); - nouveau_mem_release(filp,dev_priv->pci_heap); + nouveau_fifo_cleanup(dev, file_priv); + nouveau_mem_release(file_priv,dev_priv->fb_heap); + nouveau_mem_release(file_priv,dev_priv->agp_heap); + nouveau_mem_release(file_priv,dev_priv->pci_heap); } /* first module load, setup the mmio/fb mapping */ diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index ad77f441..c26b1db5 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -179,7 +179,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uin gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE, *sz, NOUVEAU_MEM_FB, - (DRMFILE)-2); + (struct drm_file *)-2); if (!gpuobj->im_backing) { DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n"); return -ENOMEM; diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index 51b290b4..f91e9031 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -630,7 +630,7 @@ int r128_cce_init(DRM_IOCTL_ARGS) DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data, sizeof(init)); @@ -651,7 +651,7 @@ int r128_cce_start(DRM_IOCTL_ARGS) drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { DRM_DEBUG("%s while CCE running\n", __FUNCTION__); @@ -674,7 +674,7 @@ int r128_cce_stop(DRM_IOCTL_ARGS) int ret; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data, sizeof(stop)); @@ -715,7 +715,7 @@ int r128_cce_reset(DRM_IOCTL_ARGS) drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_DEBUG("%s called before init done\n", __FUNCTION__); @@ -736,7 +736,7 @@ int r128_cce_idle(DRM_IOCTL_ARGS) drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (dev_priv->cce_running) { r128_do_cce_flush(dev_priv); @@ -750,7 +750,7 @@ int r128_engine_reset(DRM_IOCTL_ARGS) DRM_DEVICE; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return r128_do_engine_reset(dev); } @@ -826,7 +826,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev) for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; - if (buf->filp == 0) + if (buf->file_priv == 0) return buf; } @@ -884,7 +884,9 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) return -EBUSY; } -static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) +static int r128_cce_get_buffers(struct drm_device * dev, + struct drm_file *file_priv, + struct drm_dma * d) { int i; struct drm_buf *buf; @@ -894,7 +896,7 @@ static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct dr if (!buf) return -EAGAIN; - buf->filp = filp; + buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) @@ -916,7 +918,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) struct drm_dma __user *argp = (void __user *)data; struct drm_dma d; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); @@ -939,7 +941,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) d.granted_count = 0; if (d.request_count) { - ret = r128_cce_get_buffers(filp, dev, &d); + ret = r128_cce_get_buffers(dev, file_priv, &d); } DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index 077b2763..6eb59e33 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -156,7 +156,8 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev); extern void r128_driver_irq_postinstall(struct drm_device * dev); extern void r128_driver_irq_uninstall(struct drm_device * dev); extern void r128_driver_lastclose(struct drm_device * dev); -extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void r128_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index 6b19c4d3..4c244377 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -776,8 +776,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev, sarea_priv->nbox = 0; } -static int r128_cce_dispatch_blit(DRMFILE filp, - struct drm_device * dev, drm_r128_blit_t * blit) +static int r128_cce_dispatch_blit(struct drm_device * dev, + struct drm_file *file_priv, + drm_r128_blit_t * blit) { drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; @@ -829,9 +830,9 @@ static int r128_cce_dispatch_blit(DRMFILE filp, buf = dma->buflist[blit->idx]; buf_priv = buf->dev_private; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -1249,7 +1250,7 @@ static int r128_cce_clear(DRM_IOCTL_ARGS) drm_r128_clear_t clear; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data, sizeof(clear)); @@ -1315,7 +1316,7 @@ static int r128_cce_flip(DRM_IOCTL_ARGS) drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1335,7 +1336,7 @@ static int r128_cce_swap(DRM_IOCTL_ARGS) drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1359,7 +1360,7 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) drm_r128_buf_priv_t *buf_priv; drm_r128_vertex_t vertex; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -1389,9 +1390,9 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) buf = dma->buflist[vertex.idx]; buf_priv = buf->dev_private; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -1419,7 +1420,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) drm_r128_indices_t elts; int count; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -1448,9 +1449,9 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) buf = dma->buflist[elts.idx]; buf_priv = buf->dev_private; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -1488,7 +1489,7 @@ static int r128_cce_blit(DRM_IOCTL_ARGS) drm_r128_blit_t blit; int ret; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data, sizeof(blit)); @@ -1504,7 +1505,7 @@ static int r128_cce_blit(DRM_IOCTL_ARGS) RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - ret = r128_cce_dispatch_blit(filp, dev, &blit); + ret = r128_cce_dispatch_blit(dev, file_priv, &blit); COMMIT_RING(); return ret; @@ -1517,7 +1518,7 @@ static int r128_cce_depth(DRM_IOCTL_ARGS) drm_r128_depth_t depth; int ret; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data, sizeof(depth)); @@ -1551,7 +1552,7 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS) drm_r128_stipple_t stipple; u32 mask[32]; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data, sizeof(stipple)); @@ -1579,7 +1580,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) RING_LOCALS; #endif - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -1601,9 +1602,9 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) buf = dma->buflist[indirect.idx]; buf_priv = buf->dev_private; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -1675,7 +1676,7 @@ static int r128_getparam(DRM_IOCTL_ARGS) return 0; } -void r128_driver_preclose(struct drm_device * dev, DRMFILE filp) +void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_r128_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index 8fee22e9..fe46c2d2 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -779,8 +779,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, * Called by the ioctl handler function radeon_cp_cmdbuf. */ int r300_do_cp_cmdbuf(struct drm_device *dev, - DRMFILE filp, - struct drm_file *filp_priv, + struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -883,9 +882,10 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, } buf = dma->buflist[idx]; - if (buf->filp != filp || buf->pending) { + if (buf->file_priv != file_priv || buf->pending) { DRM_ERROR("bad buffer %p %p %d\n", - buf->filp, filp, buf->pending); + buf->file_priv, file_priv, + buf->pending); ret = -EINVAL; goto cleanup; } diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index 723d41c3..dd87f009 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -1843,7 +1843,7 @@ int radeon_cp_init(DRM_IOCTL_ARGS) DRM_DEVICE; drm_radeon_init_t init; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data, sizeof(init)); @@ -1869,7 +1869,7 @@ int radeon_cp_start(DRM_IOCTL_ARGS) drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (dev_priv->cp_running) { DRM_DEBUG("%s while CP running\n", __FUNCTION__); @@ -1897,7 +1897,7 @@ int radeon_cp_stop(DRM_IOCTL_ARGS) int ret; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data, sizeof(stop)); @@ -1989,7 +1989,7 @@ int radeon_cp_reset(DRM_IOCTL_ARGS) drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_DEBUG("%s called before init done\n", __FUNCTION__); @@ -2010,7 +2010,7 @@ int radeon_cp_idle(DRM_IOCTL_ARGS) drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return radeon_do_cp_idle(dev_priv); } @@ -2029,7 +2029,7 @@ int radeon_engine_reset(DRM_IOCTL_ARGS) DRM_DEVICE; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return radeon_do_engine_reset(dev); } @@ -2086,8 +2086,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) for (i = start; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; - if (buf->filp == 0 || (buf->pending && - buf_priv->age <= done_age)) { + if (buf->file_priv == NULL || (buf->pending && + buf_priv->age <= + done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; @@ -2126,8 +2127,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) for (i = start; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; - if (buf->filp == 0 || (buf->pending && - buf_priv->age <= done_age)) { + if (buf->file_priv == 0 || (buf->pending && + buf_priv->age <= + done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; @@ -2190,7 +2192,8 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) return -EBUSY; } -static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, +static int radeon_cp_get_buffers(struct drm_device *dev, + struct drm_file *file_priv, struct drm_dma * d) { int i; @@ -2201,7 +2204,7 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, if (!buf) return -EBUSY; /* NOTE: broken client */ - buf->filp = filp; + buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) @@ -2223,7 +2226,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) struct drm_dma __user *argp = (void __user *)data; struct drm_dma d; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); @@ -2246,7 +2249,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) d.granted_count = 0; if (d.request_count) { - ret = radeon_cp_get_buffers(filp, dev, &d); + ret = radeon_cp_get_buffers(dev, file_priv, &d); } DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 2dca1e70..631fe007 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -195,7 +195,7 @@ struct mem_block { struct mem_block *prev; int start; int size; - DRMFILE filp; /* 0: free, -1: heap, other: real files */ + struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; struct radeon_surface { @@ -210,7 +210,7 @@ struct radeon_virt_surface { u32 lower; u32 upper; u32 flags; - DRMFILE filp; + struct drm_file *file_priv; }; typedef struct drm_radeon_private { @@ -356,7 +356,8 @@ extern int radeon_mem_alloc(DRM_IOCTL_ARGS); extern int radeon_mem_free(DRM_IOCTL_ARGS); extern int radeon_mem_init_heap(DRM_IOCTL_ARGS); extern void radeon_mem_takedown(struct mem_block **heap); -extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap); +extern void radeon_mem_release(struct drm_file *file_priv, + struct mem_block *heap); /* radeon_irq.c */ extern int radeon_irq_emit(DRM_IOCTL_ARGS); @@ -377,18 +378,21 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); extern int radeon_driver_firstopen(struct drm_device *dev); -extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp); -extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); +extern void radeon_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); +extern void radeon_driver_postclose(struct drm_device * dev, + struct drm_file *file_priv); extern void radeon_driver_lastclose(struct drm_device * dev); -extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); +extern int radeon_driver_open(struct drm_device * dev, + struct drm_file * file_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* r300_cmdbuf.c */ extern void r300_init_reg_flags(void); -extern int r300_do_cp_cmdbuf(struct drm_device *dev, DRMFILE filp, - struct drm_file* filp_priv, +extern int r300_do_cp_cmdbuf(struct drm_device *dev, + struct drm_file *file_priv, drm_radeon_kcmd_buffer_t* cmdbuf); /* Flags for stats.boxes diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 8266d11a..140f9668 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -204,7 +204,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) drm_radeon_irq_emit_t emit; int result; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); diff --git a/shared-core/radeon_mem.c b/shared-core/radeon_mem.c index 63d4b8c9..82d454ff 100644 --- a/shared-core/radeon_mem.c +++ b/shared-core/radeon_mem.c @@ -39,7 +39,7 @@ */ static struct mem_block *split_block(struct mem_block *p, int start, int size, - DRMFILE filp) + struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { @@ -49,7 +49,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, goto out; newblock->start = start; newblock->size = p->size - (start - p->start); - newblock->filp = NULL; + newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; @@ -66,7 +66,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, goto out; newblock->start = start + size; newblock->size = p->size - size; - newblock->filp = NULL; + newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; @@ -76,20 +76,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, out: /* Our block is in the middle */ - p->filp = filp; + p->file_priv = file_priv; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, - int align2, DRMFILE filp) + int align2, struct drm_file *file_priv) { struct mem_block *p; int mask = (1 << align2) - 1; list_for_each(p, heap) { int start = (p->start + mask) & ~mask; - if (p->filp == 0 && start + size <= p->start + p->size) - return split_block(p, start, size, filp); + if (p->file_priv == 0 && start + size <= p->start + p->size) + return split_block(p, start, size, file_priv); } return NULL; @@ -108,12 +108,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start) static void free_block(struct mem_block *p) { - p->filp = NULL; + p->file_priv = NULL; - /* Assumes a single contiguous range. Needs a special filp in + /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ - if (p->next->filp == 0) { + if (p->next->file_priv == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; @@ -121,7 +121,7 @@ static void free_block(struct mem_block *p) drm_free(q, sizeof(*q), DRM_MEM_BUFS); } - if (p->prev->filp == 0) { + if (p->prev->file_priv == 0) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; @@ -147,18 +147,18 @@ static int init_heap(struct mem_block **heap, int start, int size) blocks->start = start; blocks->size = size; - blocks->filp = NULL; + blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); - (*heap)->filp = (DRMFILE) - 1; + (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ -void radeon_mem_release(DRMFILE filp, struct mem_block *heap) +void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; @@ -166,15 +166,15 @@ void radeon_mem_release(DRMFILE filp, struct mem_block *heap) return; list_for_each(p, heap) { - if (p->filp == filp) - p->filp = NULL; + if (p->file_priv == file_priv) + p->file_priv = NULL; } - /* Assumes a single contiguous range. Needs a special filp in + /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { - while (p->filp == 0 && p->next->filp == 0) { + while (p->file_priv == 0 && p->next->file_priv == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; @@ -242,7 +242,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) if (alloc.alignment < 12) alloc.alignment = 12; - block = alloc_block(*heap, alloc.size, alloc.alignment, filp); + block = alloc_block(*heap, alloc.size, alloc.alignment, file_priv); if (!block) return -ENOMEM; @@ -278,7 +278,7 @@ int radeon_mem_free(DRM_IOCTL_ARGS) if (!block) return -EFAULT; - if (block->filp != filp) + if (block->file_priv != file_priv) return -EPERM; free_block(block); diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index e351656d..3ca49d6a 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -39,7 +39,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * dev_priv, - struct drm_file * filp_priv, + struct drm_file *file_priv, u32 * offset) { u64 off = *offset; @@ -71,7 +71,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * * magic offset we get from SETPARAM or calculated from fb_location */ if (off < (dev_priv->fb_size + dev_priv->gart_size)) { - radeon_priv = filp_priv->driver_priv; + radeon_priv = file_priv->driver_priv; off += radeon_priv->radeon_fb_delta; } @@ -90,13 +90,13 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * dev_priv, - struct drm_file * filp_priv, + struct drm_file *file_priv, int id, u32 *data) { switch (id) { case RADEON_EMIT_PP_MISC: - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; @@ -104,7 +104,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * break; case RADEON_EMIT_PP_CNTL: - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { DRM_ERROR("Invalid colour buffer offset\n"); return -EINVAL; @@ -117,7 +117,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case R200_EMIT_PP_TXOFFSET_3: case R200_EMIT_PP_TXOFFSET_4: case R200_EMIT_PP_TXOFFSET_5: - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &data[0])) { DRM_ERROR("Invalid R200 texture offset\n"); return -EINVAL; @@ -127,7 +127,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case RADEON_EMIT_PP_TXFILTER_0: case RADEON_EMIT_PP_TXFILTER_1: case RADEON_EMIT_PP_TXFILTER_2: - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { DRM_ERROR("Invalid R100 texture offset\n"); return -EINVAL; @@ -143,7 +143,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * int i; for (i = 0; i < 5; i++) { if (radeon_check_and_fixup_offset(dev_priv, - filp_priv, + file_priv, &data[i])) { DRM_ERROR ("Invalid R200 cubic texture offset\n"); @@ -159,7 +159,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * int i; for (i = 0; i < 5; i++) { if (radeon_check_and_fixup_offset(dev_priv, - filp_priv, + file_priv, &data[i])) { DRM_ERROR ("Invalid R100 cubic texture offset\n"); @@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * dev_priv, - struct drm_file *filp_priv, + struct drm_file *file_priv, drm_radeon_kcmd_buffer_t * cmdbuf, unsigned int *cmdsz) @@ -326,7 +326,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * i = 2; while ((k < narrays) && (i < (count + 2))) { i++; /* skip attribute field */ - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { + if (radeon_check_and_fixup_offset(dev_priv, file_priv, + &cmd[i])) { DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); @@ -337,7 +338,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if (k == narrays) break; /* have one more to process, they come in pairs */ - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { + if (radeon_check_and_fixup_offset(dev_priv, + file_priv, &cmd[i])) + { DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); @@ -360,7 +363,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR("Invalid 3d packet for r200-class chip\n"); return -EINVAL; } - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { DRM_ERROR("Invalid rndr_gen_indx offset\n"); return -EINVAL; } @@ -375,7 +378,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); return -EINVAL; } - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); return -EINVAL; } @@ -389,7 +392,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[2] << 10; if (radeon_check_and_fixup_offset - (dev_priv, filp_priv, &offset)) { + (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid first packet offset\n"); return -EINVAL; } @@ -400,7 +403,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[3] << 10; if (radeon_check_and_fixup_offset - (dev_priv, filp_priv, &offset)) { + (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid second packet offset\n"); return -EINVAL; } @@ -439,7 +442,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, /* Emit 1.1 state */ static int radeon_emit_state(drm_radeon_private_t * dev_priv, - struct drm_file * filp_priv, + struct drm_file *file_priv, drm_radeon_context_regs_t * ctx, drm_radeon_texture_regs_t * tex, unsigned int dirty) @@ -448,13 +451,13 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, DRM_DEBUG("dirty=0x%08x\n", dirty); if (dirty & RADEON_UPLOAD_CONTEXT) { - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &ctx->rb3d_depthoffset)) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; } - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &ctx->rb3d_coloroffset)) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; @@ -543,7 +546,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, } if (dirty & RADEON_UPLOAD_TEX0) { - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex[0].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 0\n"); return -EINVAL; @@ -563,7 +566,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, } if (dirty & RADEON_UPLOAD_TEX1) { - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex[1].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 1\n"); return -EINVAL; @@ -583,7 +586,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, } if (dirty & RADEON_UPLOAD_TEX2) { - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex[2].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 2\n"); return -EINVAL; @@ -608,7 +611,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, /* Emit 1.2 state */ static int radeon_emit_state2(drm_radeon_private_t * dev_priv, - struct drm_file * filp_priv, + struct drm_file *file_priv, drm_radeon_state_t * state) { RING_LOCALS; @@ -621,7 +624,7 @@ static int radeon_emit_state2(drm_radeon_private_t * dev_priv, ADVANCE_RING(); } - return radeon_emit_state(dev_priv, filp_priv, &state->context, + return radeon_emit_state(dev_priv, file_priv, &state->context, state->tex, state->dirty); } @@ -1646,13 +1649,12 @@ static void radeon_cp_dispatch_indices(struct drm_device * dev, #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE -static int radeon_cp_dispatch_texture(DRMFILE filp, - struct drm_device * dev, +static int radeon_cp_dispatch_texture(struct drm_device * dev, + struct drm_file *file_priv, drm_radeon_texture_t * tex, drm_radeon_tex_image_t * image) { drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_file *filp_priv; struct drm_buf *buf; u32 format; u32 *buffer; @@ -1664,9 +1666,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, u32 offset; RING_LOCALS; - DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); - - if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) { + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) { DRM_ERROR("Invalid destination offset\n"); return -EINVAL; } @@ -1841,7 +1841,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, } #undef RADEON_COPY_MT - buf->filp = filp; + buf->file_priv = file_priv; buf->used = size; offset = dev_priv->gart_buffers_offset + buf->offset; BEGIN_RING(9); @@ -1929,7 +1929,8 @@ static void radeon_apply_surface_regs(int surf_index, * not always be available. */ static int alloc_surface(drm_radeon_surface_alloc_t *new, - drm_radeon_private_t *dev_priv, DRMFILE filp) + drm_radeon_private_t *dev_priv, + struct drm_file *file_priv) { struct radeon_virt_surface *s; int i; @@ -1959,7 +1960,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new, /* find a virtual surface */ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) - if (dev_priv->virt_surfaces[i].filp == 0) + if (dev_priv->virt_surfaces[i].file_priv == 0) break; if (i == 2 * RADEON_MAX_SURFACES) { return -1; @@ -1977,7 +1978,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new, s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; - s->filp = filp; + s->file_priv = file_priv; dev_priv->surfaces[i].refcount++; dev_priv->surfaces[i].lower = s->lower; radeon_apply_surface_regs(s->surface_index, dev_priv); @@ -1993,7 +1994,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new, s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; - s->filp = filp; + s->file_priv = file_priv; dev_priv->surfaces[i].refcount++; dev_priv->surfaces[i].upper = s->upper; radeon_apply_surface_regs(s->surface_index, dev_priv); @@ -2009,7 +2010,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new, s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; - s->filp = filp; + s->file_priv = file_priv; dev_priv->surfaces[i].refcount = 1; dev_priv->surfaces[i].lower = s->lower; dev_priv->surfaces[i].upper = s->upper; @@ -2023,7 +2024,8 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new, return -1; } -static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, +static int free_surface(struct drm_file *file_priv, + drm_radeon_private_t * dev_priv, int lower) { struct radeon_virt_surface *s; @@ -2031,8 +2033,9 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, /* find the virtual surface */ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { s = &(dev_priv->virt_surfaces[i]); - if (s->filp) { - if ((lower == s->lower) && (filp == s->filp)) { + if (s->file_priv) { + if ((lower == s->lower) && (file_priv == s->file_priv)) + { if (dev_priv->surfaces[s->surface_index]. lower == s->lower) dev_priv->surfaces[s->surface_index]. @@ -2048,7 +2051,7 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, refcount == 0) dev_priv->surfaces[s->surface_index]. flags = 0; - s->filp = NULL; + s->file_priv = NULL; radeon_apply_surface_regs(s->surface_index, dev_priv); return 0; @@ -2058,13 +2061,13 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, return 1; } -static void radeon_surfaces_release(DRMFILE filp, +static void radeon_surfaces_release(struct drm_file *file_priv, drm_radeon_private_t * dev_priv) { int i; for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { - if (dev_priv->virt_surfaces[i].filp == filp) - free_surface(filp, dev_priv, + if (dev_priv->virt_surfaces[i].file_priv == file_priv) + free_surface(file_priv, dev_priv, dev_priv->virt_surfaces[i].lower); } } @@ -2087,7 +2090,7 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS) (drm_radeon_surface_alloc_t __user *) data, sizeof(alloc)); - if (alloc_surface(&alloc, dev_priv, filp) == -1) + if (alloc_surface(&alloc, dev_priv, file_priv) == -1) return -EINVAL; else return 0; @@ -2107,7 +2110,7 @@ static int radeon_surface_free(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data, sizeof(memfree)); - if (free_surface(filp, dev_priv, memfree.address)) + if (free_surface(file_priv, dev_priv, memfree.address)) return -EINVAL; else return 0; @@ -2122,7 +2125,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS) drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data, sizeof(clear)); @@ -2178,7 +2181,7 @@ static int radeon_cp_flip(DRM_IOCTL_ARGS) drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2198,7 +2201,7 @@ static int radeon_cp_swap(DRM_IOCTL_ARGS) drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2216,14 +2219,13 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_vertex_t vertex; drm_radeon_tcl_prim_t prim; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -2232,8 +2234,6 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) sarea_priv = dev_priv->sarea_priv; - DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, sizeof(vertex)); @@ -2255,9 +2255,9 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) buf = dma->buflist[vertex.idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -2271,7 +2271,7 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) buf->used = vertex.count; /* not used? */ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { - if (radeon_emit_state(dev_priv, filp_priv, + if (radeon_emit_state(dev_priv, file_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty)) { @@ -2306,7 +2306,6 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; @@ -2314,7 +2313,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) drm_radeon_tcl_prim_t prim; int count; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -2322,8 +2321,6 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) } sarea_priv = dev_priv->sarea_priv; - DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); - DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data, sizeof(elts)); @@ -2345,9 +2342,9 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) buf = dma->buflist[elts.idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -2370,7 +2367,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) buf->used = elts.end; if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { - if (radeon_emit_state(dev_priv, filp_priv, + if (radeon_emit_state(dev_priv, file_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty)) { @@ -2410,7 +2407,7 @@ static int radeon_cp_texture(DRM_IOCTL_ARGS) drm_radeon_tex_image_t image; int ret; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data, sizeof(tex)); @@ -2428,7 +2425,7 @@ static int radeon_cp_texture(DRM_IOCTL_ARGS) RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - ret = radeon_cp_dispatch_texture(filp, dev, &tex, &image); + ret = radeon_cp_dispatch_texture(dev, file_priv, &tex, &image); COMMIT_RING(); return ret; @@ -2441,7 +2438,7 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS) drm_radeon_stipple_t stipple; u32 mask[32]; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data, sizeof(stipple)); @@ -2466,7 +2463,7 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) drm_radeon_indirect_t indirect; RING_LOCALS; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -2488,9 +2485,9 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) buf = dma->buflist[indirect.idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { @@ -2535,7 +2532,6 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; @@ -2543,7 +2539,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) int i; unsigned char laststate; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); @@ -2552,8 +2548,6 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) sarea_priv = dev_priv->sarea_priv; - DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data, sizeof(vertex)); @@ -2571,9 +2565,9 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) buf = dma->buflist[vertex.idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", - DRM_CURRENTPID, buf->filp); + DRM_CURRENTPID, buf->file_priv); return -EINVAL; } @@ -2600,7 +2594,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) sizeof(state))) return -EFAULT; - if (radeon_emit_state2(dev_priv, filp_priv, &state)) { + if (radeon_emit_state2(dev_priv, file_priv, &state)) { DRM_ERROR("radeon_emit_state2 failed\n"); return -EINVAL; } @@ -2638,7 +2632,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) } static int radeon_emit_packets(drm_radeon_private_t * dev_priv, - struct drm_file * filp_priv, + struct drm_file *file_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { @@ -2658,7 +2652,7 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv, return -EINVAL; } - if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { + if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { DRM_ERROR("Packet verification failed\n"); return -EINVAL; } @@ -2764,7 +2758,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, } static int radeon_emit_packet3(struct drm_device * dev, - struct drm_file * filp_priv, + struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -2774,7 +2768,7 @@ static int radeon_emit_packet3(struct drm_device * dev, DRM_DEBUG("\n"); - if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv, + if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, cmdbuf, &cmdsz))) { DRM_ERROR("Packet verification failed\n"); return ret; @@ -2790,7 +2784,7 @@ static int radeon_emit_packet3(struct drm_device * dev, } static int radeon_emit_packet3_cliprect(struct drm_device *dev, - struct drm_file *filp_priv, + struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int orig_nbox) { @@ -2804,7 +2798,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev, DRM_DEBUG("\n"); - if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv, + if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, cmdbuf, &cmdsz))) { DRM_ERROR("Packet verification failed\n"); return ret; @@ -2884,7 +2878,6 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_file *filp_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf = NULL; int idx; @@ -2893,15 +2886,13 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) int orig_nbox, orig_bufsz; char *kbuf = NULL; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_radeon_kcmd_buffer_t __user *) data, sizeof(cmdbuf)); @@ -2934,7 +2925,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) if (dev_priv->microcode_version == UCODE_R300) { int temp; - temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf); + temp = r300_do_cp_cmdbuf(dev, file_priv, &cmdbuf); if (orig_bufsz != 0) drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); @@ -2953,7 +2944,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_PACKET: DRM_DEBUG("RADEON_CMD_PACKET\n"); if (radeon_emit_packets - (dev_priv, filp_priv, header, &cmdbuf)) { + (dev_priv, file_priv, header, &cmdbuf)) { DRM_ERROR("radeon_emit_packets failed\n"); goto err; } @@ -2985,9 +2976,10 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) } buf = dma->buflist[idx]; - if (buf->filp != filp || buf->pending) { + if (buf->file_priv != file_priv || buf->pending) { DRM_ERROR("bad buffer %p %p %d\n", - buf->filp, filp, buf->pending); + buf->file_priv, file_priv, + buf->pending); goto err; } @@ -2996,7 +2988,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_PACKET3: DRM_DEBUG("RADEON_CMD_PACKET3\n"); - if (radeon_emit_packet3(dev, filp_priv, &cmdbuf)) { + if (radeon_emit_packet3(dev, file_priv, &cmdbuf)) { DRM_ERROR("radeon_emit_packet3 failed\n"); goto err; } @@ -3005,7 +2997,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_PACKET3_CLIP: DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); if (radeon_emit_packet3_cliprect - (dev, filp_priv, &cmdbuf, orig_nbox)) { + (dev, file_priv, &cmdbuf, orig_nbox)) { DRM_ERROR("radeon_emit_packet3_clip failed\n"); goto err; } @@ -3151,7 +3143,6 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_file *filp_priv; drm_radeon_setparam_t sp; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3160,14 +3151,12 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); - DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data, sizeof(sp)); switch (sp.param) { case RADEON_SETPARAM_FB_LOCATION: - radeon_priv = filp_priv->driver_priv; + radeon_priv = file_priv->driver_priv; radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value; break; case RADEON_SETPARAM_SWITCH_TILING: @@ -3213,14 +3202,15 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) * * DRM infrastructure takes care of reclaiming dma buffers. */ -void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp) +void radeon_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->page_flipping = 0; - radeon_mem_release(filp, dev_priv->gart_heap); - radeon_mem_release(filp, dev_priv->fb_heap); - radeon_surfaces_release(filp, dev_priv); + radeon_mem_release(file_priv, dev_priv->gart_heap); + radeon_mem_release(file_priv, dev_priv->fb_heap); + radeon_surfaces_release(file_priv, dev_priv); } } @@ -3237,7 +3227,7 @@ void radeon_driver_lastclose(struct drm_device * dev) radeon_do_release(dev); } -int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv) +int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3250,7 +3240,7 @@ int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv) if (!radeon_priv) return -ENOMEM; - filp_priv->driver_priv = radeon_priv; + file_priv->driver_priv = radeon_priv; if (dev_priv) radeon_priv->radeon_fb_delta = dev_priv->fb_location; @@ -3259,10 +3249,10 @@ int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv) return 0; } -void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp_priv) +void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv) { struct drm_radeon_driver_file_fields *radeon_priv = - filp_priv->driver_priv; + file_priv->driver_priv; drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); } diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 1835d758..c2dee6f9 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -932,7 +932,7 @@ static int savage_bci_init(DRM_IOCTL_ARGS) DRM_DEVICE; drm_savage_init_t init; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data, sizeof(init)); @@ -955,7 +955,7 @@ static int savage_bci_event_emit(DRM_IOCTL_ARGS) DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data, sizeof(event)); @@ -1006,7 +1006,9 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS) * DMA buffer management */ -static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d) +static int savage_bci_get_buffers(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_dma *d) { struct drm_buf *buf; int i; @@ -1016,7 +1018,7 @@ static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct d if (!buf) return -EAGAIN; - buf->filp = filp; + buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) @@ -1037,7 +1039,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) struct drm_dma d; int ret = 0; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *)data, sizeof(d)); @@ -1060,7 +1062,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) d.granted_count = 0; if (d.request_count) { - ret = savage_bci_get_buffers(filp, dev, &d); + ret = savage_bci_get_buffers(dev, file_priv, &d); } DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *)data, d, sizeof(d)); @@ -1068,7 +1070,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) return ret; } -void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) +void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_savage_private_t *dev_priv = dev->dev_private; @@ -1087,7 +1089,7 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; - if (buf->filp == filp && buf_priv && + if (buf->file_priv == file_priv && buf_priv && buf_priv->next == NULL && buf_priv->prev == NULL) { uint16_t event; DRM_DEBUG("reclaimed from client\n"); @@ -1097,7 +1099,7 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) } } - drm_core_reclaim_buffers(dev, filp); + drm_core_reclaim_buffers(dev, file_priv); } struct drm_ioctl_desc savage_ioctls[] = { diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index e9e2231f..3208cfcc 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -212,7 +212,8 @@ extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); extern int savage_driver_firstopen(struct drm_device *dev); extern void savage_driver_lastclose(struct drm_device *dev); extern int savage_driver_unload(struct drm_device *dev); -extern void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp); +extern void savage_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv); /* state functions */ extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 753fe7d3..f5b9888c 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -968,7 +968,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DRM_DEBUG("\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data, sizeof(cmdbuf)); diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index 57d60133..a4a88fe1 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -67,7 +67,8 @@ typedef struct drm_sis_private { } drm_sis_private_t; extern int sis_idle(struct drm_device *dev); -extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); +extern void sis_reclaim_buffers_locked(struct drm_device *dev, + struct drm_file *file_priv); extern void sis_lastclose(struct drm_device *dev); #else diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 895c78bf..7fe6d019 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -320,7 +320,7 @@ static int via_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return via_driver_dma_quiescent(dev); } @@ -331,7 +331,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS) drm_via_cmdbuffer_t cmdbuf; int ret; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, sizeof(cmdbuf)); @@ -376,7 +376,7 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) drm_via_cmdbuffer_t cmdbuf; int ret; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, sizeof(cmdbuf)); @@ -654,7 +654,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) drm_via_private_t *dev_priv; DRM_DEBUG("via cmdbuf_size\n"); - LOCK_TEST_WITH_RETURN( dev, filp ); + LOCK_TEST_WITH_RETURN(dev, file_priv); dev_priv = (drm_via_private_t *) dev->dev_private; diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 05935c81..b7997942 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -181,7 +181,8 @@ extern void via_cleanup_futex(drm_via_private_t *dev_priv); extern void via_release_futex(drm_via_private_t *dev_priv, int context); #ifdef VIA_HAVE_CORE_MM -extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); +extern void via_reclaim_buffers_locked(struct drm_device *dev, + struct drm_file *file_priv); extern void via_lastclose(struct drm_device *dev); #else extern int via_init_context(struct drm_device * dev, int context); From 5b38e134163cc375e91424c4688cc9328c6e9082 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 17:11:11 -0700 Subject: [PATCH 165/437] Replace DRM_IOCTL_ARGS with (dev, data, file_priv) and remove DRM_DEVICE. The data is now in kernel space, copied in/out as appropriate according to the This results in DRM_COPY_{TO,FROM}_USER going away, and error paths to deal with those failures. This also means that XFree86 4.2.0 support for i810 DRM is lost. --- bsd-core/drmP.h | 134 ++++++------- bsd-core/drm_agpsupport.c | 24 +-- bsd-core/drm_auth.c | 71 +++---- bsd-core/drm_bufs.c | 147 ++++++-------- bsd-core/drm_context.c | 117 +++++------ bsd-core/drm_dma.c | 6 +- bsd-core/drm_drawable.c | 17 +- bsd-core/drm_drv.c | 130 +++++++------ bsd-core/drm_ioctl.c | 124 +++++------- bsd-core/drm_irq.c | 66 +++---- bsd-core/drm_lock.c | 33 ++-- bsd-core/drm_scatter.c | 29 +-- bsd-core/drm_vm.c | 2 +- linux-core/drmP.h | 202 +++++++++---------- linux-core/drm_agpsupport.c | 105 +++------- linux-core/drm_auth.c | 36 ++-- linux-core/drm_bo.c | 181 +++++++---------- linux-core/drm_bufs.c | 161 ++++++---------- linux-core/drm_context.c | 149 +++++--------- linux-core/drm_drawable.c | 61 +++--- linux-core/drm_drv.c | 207 ++++++++++---------- linux-core/drm_fence.c | 151 ++++++--------- linux-core/drm_fops.c | 4 +- linux-core/drm_ioctl.c | 169 +++++++--------- linux-core/drm_irq.c | 87 ++++----- linux-core/drm_lock.c | 56 +++--- linux-core/drm_memory_debug.c | 2 +- linux-core/drm_memory_debug.h | 2 +- linux-core/drm_objects.h | 53 ++--- linux-core/drm_os_linux.h | 12 -- linux-core/drm_scatter.c | 37 +--- linux-core/ffb_context.c | 22 +-- linux-core/ffb_drv.c | 8 +- linux-core/ffb_drv.h | 2 +- linux-core/i810_dma.c | 246 +++++++----------------- linux-core/i810_drm.h | 23 --- linux-core/sis_mm.c | 90 ++++----- linux-core/via_dmablit.c | 22 +-- linux-core/via_mm.c | 67 +++---- shared-core/i915_dma.c | 174 ++++++++--------- shared-core/i915_drv.h | 27 ++- shared-core/i915_irq.c | 106 +++++----- shared-core/i915_mem.c | 58 +++--- shared-core/mach64_dma.c | 51 +++-- shared-core/mach64_drv.h | 30 ++- shared-core/mach64_state.c | 83 ++++---- shared-core/mga_dma.c | 86 ++++----- shared-core/mga_drv.h | 15 +- shared-core/mga_state.c | 139 +++++--------- shared-core/nouveau_drv.h | 14 +- shared-core/nouveau_fifo.c | 59 +++--- shared-core/nouveau_mem.c | 43 ++--- shared-core/nouveau_notifier.c | 19 +- shared-core/nouveau_object.c | 27 ++- shared-core/nouveau_state.c | 56 +++--- shared-core/r128_cce.c | 62 +++--- shared-core/r128_drv.h | 16 +- shared-core/r128_state.c | 208 +++++++++----------- shared-core/radeon_cp.c | 67 +++---- shared-core/radeon_drv.h | 28 +-- shared-core/radeon_irq.c | 20 +- shared-core/radeon_mem.c | 44 ++--- shared-core/radeon_state.c | 342 ++++++++++++++------------------- shared-core/savage_bci.c | 68 +++---- shared-core/savage_drv.h | 4 +- shared-core/savage_state.c | 107 ++++++----- shared-core/sis_mm.c | 162 +++++++--------- shared-core/via_dma.c | 99 ++++------ shared-core/via_drv.h | 18 +- shared-core/via_irq.c | 34 ++-- shared-core/via_map.c | 12 +- shared-core/via_mm.c | 49 ++--- shared-core/via_video.c | 20 +- 73 files changed, 2219 insertions(+), 3183 deletions(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 84baf5f0..73342d09 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -230,12 +230,15 @@ enum { #define DRM_AGP_MEM struct agp_memory_info #if defined(__FreeBSD__) -#define DRM_DEVICE \ - drm_device_t *dev = kdev->si_drv1 -#define DRM_IOCTL_ARGS struct cdev *kdev, u_long cmd, caddr_t data, \ - int flags, DRM_STRUCTPROC *p, \ - struct drm_file *file_priv +#define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1) +#elif defined(__NetBSD__) +#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, minor(_kdev)) +#elif defined(__OpenBSD__) +#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, \ + minor(_kdev)))->dv_cfdata->cf_driver->cd_devs[minor(_kdev)] +#endif +#if defined(__FreeBSD__) #define PAGE_ALIGN(addr) round_page(addr) /* DRM_SUSER returns true if the user is superuser */ #if __FreeBSD_version >= 700000 @@ -249,18 +252,6 @@ enum { #else /* __FreeBSD__ */ -#if defined(__NetBSD__) -#define DRM_DEVICE \ - drm_device_t *dev = device_lookup(&drm_cd, minor(kdev)) -#elif defined(__OpenBSD__) -#define DRM_DEVICE \ - drm_device_t *dev = (device_lookup(&drm_cd, \ - minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)] -#endif /* __OpenBSD__ */ -#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, \ - int flags, DRM_STRUCTPROC *p, \ - struct drm_file *file_priv - #define CDEV_MAJOR 34 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) /* DRM_SUSER returns true if the user is superuser */ @@ -348,14 +339,6 @@ typedef vaddr_t vm_offset_t; (!uvm_useracc((caddr_t)uaddr, size, VM_PROT_READ)) #endif /* !__FreeBSD__ */ -#define DRM_COPY_TO_USER_IOCTL(user, kern, size) \ - if ( IOCPARM_LEN(cmd) != size) \ - return EINVAL; \ - *user = kern; -#define DRM_COPY_FROM_USER_IOCTL(kern, user, size) \ - if ( IOCPARM_LEN(cmd) != size) \ - return EINVAL; \ - kern = *user; #define DRM_COPY_TO_USER(user, kern, size) \ copyout(kern, user, size) #define DRM_COPY_FROM_USER(kern, user, size) \ @@ -439,9 +422,16 @@ typedef struct drm_pci_id_list #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 typedef struct drm_ioctl_desc { - int (*func)(DRM_IOCTL_ARGS); + unsigned long cmd; + int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv); int flags; } drm_ioctl_desc_t; +/** + * Creates a driver or general drm_ioctl_desc array entry for the given + * ioctl, for use by drm_ioctl(). + */ +#define DRM_IOCTL_DEF(ioctl, func, flags) \ + [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} typedef struct drm_magic_entry { drm_magic_t magic; @@ -636,7 +626,7 @@ struct drm_driver_info { int (*unload)(struct drm_device *); void (*reclaim_buffers_locked)(struct drm_device *, struct drm_file *file_priv); - int (*dma_ioctl)(DRM_IOCTL_ARGS); + int (*dma_ioctl)(drm_device_t *dev, void *data, struct drm_file *file_priv); void (*dma_ready)(struct drm_device *); int (*dma_quiescent)(struct drm_device *); int (*dma_flush_block_and_flush)(struct drm_device *, int context, @@ -933,72 +923,72 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info); /* Locking IOCTL support (drm_drv.c) */ -int drm_lock(DRM_IOCTL_ARGS); -int drm_unlock(DRM_IOCTL_ARGS); -int drm_version(DRM_IOCTL_ARGS); -int drm_setversion(DRM_IOCTL_ARGS); +int drm_lock(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_version(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv); /* Misc. IOCTL support (drm_ioctl.c) */ -int drm_irq_by_busid(DRM_IOCTL_ARGS); -int drm_getunique(DRM_IOCTL_ARGS); -int drm_setunique(DRM_IOCTL_ARGS); -int drm_getmap(DRM_IOCTL_ARGS); -int drm_getclient(DRM_IOCTL_ARGS); -int drm_getstats(DRM_IOCTL_ARGS); -int drm_noop(DRM_IOCTL_ARGS); +int drm_irq_by_busid(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_getunique(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_setunique(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_getmap(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_getclient(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_noop(drm_device_t *dev, void *data, struct drm_file *file_priv); /* Context IOCTL support (drm_context.c) */ -int drm_resctx(DRM_IOCTL_ARGS); -int drm_addctx(DRM_IOCTL_ARGS); -int drm_modctx(DRM_IOCTL_ARGS); -int drm_getctx(DRM_IOCTL_ARGS); -int drm_switchctx(DRM_IOCTL_ARGS); -int drm_newctx(DRM_IOCTL_ARGS); -int drm_rmctx(DRM_IOCTL_ARGS); -int drm_setsareactx(DRM_IOCTL_ARGS); -int drm_getsareactx(DRM_IOCTL_ARGS); +int drm_resctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_addctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_modctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_getctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_switchctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_newctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_rmctx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_setsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_getsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv); /* Drawable IOCTL support (drm_drawable.c) */ -int drm_adddraw(DRM_IOCTL_ARGS); -int drm_rmdraw(DRM_IOCTL_ARGS); -int drm_update_draw(DRM_IOCTL_ARGS); +int drm_adddraw(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_rmdraw(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_update_draw(drm_device_t *dev, void *data, struct drm_file *file_priv); struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, int handle); /* Authentication IOCTL support (drm_auth.c) */ -int drm_getmagic(DRM_IOCTL_ARGS); -int drm_authmagic(DRM_IOCTL_ARGS); +int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv); /* Buffer management support (drm_bufs.c) */ -int drm_addmap_ioctl(DRM_IOCTL_ARGS); -int drm_rmmap_ioctl(DRM_IOCTL_ARGS); -int drm_addbufs_ioctl(DRM_IOCTL_ARGS); -int drm_infobufs(DRM_IOCTL_ARGS); -int drm_markbufs(DRM_IOCTL_ARGS); -int drm_freebufs(DRM_IOCTL_ARGS); -int drm_mapbufs(DRM_IOCTL_ARGS); +int drm_addmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_rmmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_addbufs_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_infobufs(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_markbufs(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_freebufs(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_mapbufs(drm_device_t *dev, void *data, struct drm_file *file_priv); /* DMA support (drm_dma.c) */ -int drm_dma(DRM_IOCTL_ARGS); +int drm_dma(drm_device_t *dev, void *data, struct drm_file *file_priv); /* IRQ support (drm_irq.c) */ -int drm_control(DRM_IOCTL_ARGS); -int drm_wait_vblank(DRM_IOCTL_ARGS); +int drm_control(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv); void drm_locked_tasklet(drm_device_t *dev, void (*tasklet)(drm_device_t *dev)); /* AGP/GART support (drm_agpsupport.c) */ -int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS); -int drm_agp_release_ioctl(DRM_IOCTL_ARGS); -int drm_agp_enable_ioctl(DRM_IOCTL_ARGS); -int drm_agp_info_ioctl(DRM_IOCTL_ARGS); -int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS); -int drm_agp_free_ioctl(DRM_IOCTL_ARGS); -int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS); -int drm_agp_bind_ioctl(DRM_IOCTL_ARGS); +int drm_agp_acquire_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_release_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_enable_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_info_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_free_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_unbind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_agp_bind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); /* Scatter Gather Support (drm_scatter.c) */ -int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS); -int drm_sg_free(DRM_IOCTL_ARGS); +int drm_sg_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv); +int drm_sg_free(drm_device_t *dev, void *data, struct drm_file *file_priv); /* consistent PCI memory functions (drm_pci.c) */ drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, diff --git a/bsd-core/drm_agpsupport.c b/bsd-core/drm_agpsupport.c index 28239d1b..e8e162de 100644 --- a/bsd-core/drm_agpsupport.c +++ b/bsd-core/drm_agpsupport.c @@ -125,11 +125,10 @@ int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) return 0; } -int drm_agp_info_ioctl(DRM_IOCTL_ARGS) +int drm_agp_info_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { int err; drm_agp_info_t info; - DRM_DEVICE; err = drm_agp_info(dev, &info); if (err != 0) @@ -139,9 +138,8 @@ int drm_agp_info_ioctl(DRM_IOCTL_ARGS) return 0; } -int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS) +int drm_agp_acquire_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; return drm_agp_acquire(dev); } @@ -161,9 +159,8 @@ int drm_agp_acquire(drm_device_t *dev) return 0; } -int drm_agp_release_ioctl(DRM_IOCTL_ARGS) +int drm_agp_release_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; return drm_agp_release(dev); } @@ -190,10 +187,9 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) return 0; } -int drm_agp_enable_ioctl(DRM_IOCTL_ARGS) +int drm_agp_enable_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { drm_agp_mode_t mode; - DRM_DEVICE; mode = *(drm_agp_mode_t *) data; @@ -243,9 +239,8 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) return 0; } -int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS) +int drm_agp_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_agp_buffer_t request; int retcode; @@ -292,9 +287,8 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) return retcode; } -int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS) +int drm_agp_unbind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_agp_binding_t request; int retcode; @@ -333,9 +327,8 @@ int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) return retcode; } -int drm_agp_bind_ioctl(DRM_IOCTL_ARGS) +int drm_agp_bind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_agp_binding_t request; int retcode; @@ -378,9 +371,8 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) } -int drm_agp_free_ioctl(DRM_IOCTL_ARGS) +int drm_agp_free_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_agp_buffer_t request; int retcode; diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c index 556bf891..964f9a42 100644 --- a/bsd-core/drm_auth.c +++ b/bsd-core/drm_auth.c @@ -40,21 +40,16 @@ static int drm_hash_magic(drm_magic_t magic) static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic) { - drm_file_t *retval = NULL; drm_magic_entry_t *pt; - int hash; + int hash = drm_hash_magic(magic); - hash = drm_hash_magic(magic); - - DRM_LOCK(); for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { if (pt->magic == magic) { - retval = pt->priv; - break; + return pt->priv; } } - DRM_UNLOCK(); - return retval; + + return NULL; } static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) @@ -115,58 +110,50 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) return EINVAL; } -int drm_getmagic(DRM_IOCTL_ARGS) +int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; static drm_magic_t sequence = 0; - drm_auth_t auth; - drm_file_t *priv; - - DRM_LOCK(); - priv = drm_find_file_by_proc(dev, p); - DRM_UNLOCK(); - if (priv == NULL) { - DRM_ERROR("can't find authenticator\n"); - return EINVAL; - } + drm_auth_t *auth = data; /* Find unique magic */ - if (priv->magic) { - auth.magic = priv->magic; + if (file_priv->magic) { + auth->magic = file_priv->magic; } else { + DRM_LOCK(); do { int old = sequence; - auth.magic = old+1; + auth->magic = old+1; - if (!atomic_cmpset_int(&sequence, old, auth.magic)) + if (!atomic_cmpset_int(&sequence, old, auth->magic)) continue; - } while (drm_find_file(dev, auth.magic)); - priv->magic = auth.magic; - drm_add_magic(dev, priv, auth.magic); + } while (drm_find_file(dev, auth->magic)); + file_priv->magic = auth->magic; + DRM_UNLOCK(); + drm_add_magic(dev, file_priv, auth->magic); } - DRM_DEBUG("%u\n", auth.magic); - - DRM_COPY_TO_USER_IOCTL((drm_auth_t *)data, auth, sizeof(auth)); + DRM_DEBUG("%u\n", auth->magic); return 0; } -int drm_authmagic(DRM_IOCTL_ARGS) +int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) { - drm_auth_t auth; - drm_file_t *file; - DRM_DEVICE; + drm_auth_t *auth = data; + drm_file_t *priv; - DRM_COPY_FROM_USER_IOCTL(auth, (drm_auth_t *)data, sizeof(auth)); + DRM_DEBUG("%u\n", auth->magic); - DRM_DEBUG("%u\n", auth.magic); - - if ((file = drm_find_file(dev, auth.magic))) { - file->authenticated = 1; - drm_remove_magic(dev, auth.magic); + DRM_LOCK(); + priv = drm_find_file(dev, auth->magic); + if (priv != NULL) { + priv->authenticated = 1; + drm_remove_magic(dev, auth->magic); + DRM_UNLOCK(); return 0; + } else { + DRM_UNLOCK(); + return EINVAL; } - return EINVAL; } diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index bc019741..a0a3fc73 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -250,39 +250,35 @@ done: return 0; } -int drm_addmap_ioctl(DRM_IOCTL_ARGS) +int drm_addmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - drm_map_t request; + drm_map_t *request = data; drm_local_map_t *map; int err; - DRM_DEVICE; if (!(dev->flags & (FREAD|FWRITE))) return EACCES; /* Require read/write */ - DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data, sizeof(drm_map_t)); - - if (!DRM_SUSER(p) && request.type != _DRM_AGP) + if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP) return EACCES; DRM_LOCK(); - err = drm_addmap(dev, request.offset, request.size, request.type, - request.flags, &map); + err = drm_addmap(dev, request->offset, request->size, request->type, + request->flags, &map); DRM_UNLOCK(); if (err != 0) return err; - request.offset = map->offset; - request.size = map->size; - request.type = map->type; - request.flags = map->flags; - request.mtrr = map->mtrr; - request.handle = map->handle; + request->offset = map->offset; + request->size = map->size; + request->type = map->type; + request->flags = map->flags; + request->mtrr = map->mtrr; + request->handle = map->handle; - if (request.type != _DRM_SHM) { - request.handle = (void *)request.offset; + if (request->type != _DRM_SHM) { + request->handle = (void *)request->offset; } - DRM_COPY_TO_USER_IOCTL((drm_map_t *)data, request, sizeof(drm_map_t)); return 0; } @@ -333,17 +329,14 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map) * isn't in use. */ -int drm_rmmap_ioctl(DRM_IOCTL_ARGS) +int drm_rmmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_local_map_t *map; - drm_map_t request; - - DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) ); + drm_map_t *request = data; DRM_LOCK(); TAILQ_FOREACH(map, &dev->maplist, link) { - if (map->handle == request.handle && + if (map->handle == request->handle && map->flags & _DRM_REMOVABLE) break; } @@ -873,39 +866,29 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) return ret; } -int drm_addbufs_ioctl(DRM_IOCTL_ARGS) +int drm_addbufs_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_buf_desc_t request; + drm_buf_desc_t *request = data; int err; - DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_desc_t *)data, - sizeof(request)); - - if (request.flags & _DRM_AGP_BUFFER) - err = drm_addbufs_agp(dev, &request); - else if (request.flags & _DRM_SG_BUFFER) - err = drm_addbufs_sg(dev, &request); + if (request->flags & _DRM_AGP_BUFFER) + err = drm_addbufs_agp(dev, request); + else if (request->flags & _DRM_SG_BUFFER) + err = drm_addbufs_sg(dev, request); else - err = drm_addbufs_pci(dev, &request); - - DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, - sizeof(request)); + err = drm_addbufs_pci(dev, request); return err; } -int drm_infobufs(DRM_IOCTL_ARGS) +int drm_infobufs(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_device_dma_t *dma = dev->dma; - drm_buf_info_t request; + drm_buf_info_t *request = data; int i; int count; int retcode = 0; - DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) ); - DRM_SPINLOCK(&dev->dma_lock); ++dev->buf_use; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); @@ -916,7 +899,7 @@ int drm_infobufs(DRM_IOCTL_ARGS) DRM_DEBUG( "count = %d\n", count ); - if ( request.count >= count ) { + if ( request->count >= count ) { for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { if ( dma->bufs[i].buf_count ) { drm_buf_desc_t from; @@ -926,7 +909,7 @@ int drm_infobufs(DRM_IOCTL_ARGS) from.low_mark = dma->bufs[i].freelist.low_mark; from.high_mark = dma->bufs[i].freelist.high_mark; - if (DRM_COPY_TO_USER(&request.list[count], &from, + if (DRM_COPY_TO_USER(&request->list[count], &from, sizeof(drm_buf_desc_t)) != 0) { retcode = EFAULT; break; @@ -942,62 +925,54 @@ int drm_infobufs(DRM_IOCTL_ARGS) } } } - request.count = count; - - DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) ); + request->count = count; return retcode; } -int drm_markbufs(DRM_IOCTL_ARGS) +int drm_markbufs(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; + drm_buf_desc_t *request = data; int order; - DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); - DRM_DEBUG( "%d, %d, %d\n", - request.size, request.low_mark, request.high_mark ); + request->size, request->low_mark, request->high_mark ); - order = drm_order(request.size); + order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || - request.low_mark < 0 || request.high_mark < 0) { + request->low_mark < 0 || request->high_mark < 0) { return EINVAL; } DRM_SPINLOCK(&dev->dma_lock); - if (request.low_mark > dma->bufs[order].buf_count || - request.high_mark > dma->bufs[order].buf_count) { + if (request->low_mark > dma->bufs[order].buf_count || + request->high_mark > dma->bufs[order].buf_count) { return EINVAL; } - dma->bufs[order].freelist.low_mark = request.low_mark; - dma->bufs[order].freelist.high_mark = request.high_mark; + dma->bufs[order].freelist.low_mark = request->low_mark; + dma->bufs[order].freelist.high_mark = request->high_mark; DRM_SPINUNLOCK(&dev->dma_lock); return 0; } -int drm_freebufs(DRM_IOCTL_ARGS) +int drm_freebufs(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_device_dma_t *dma = dev->dma; - drm_buf_free_t request; + drm_buf_free_t *request = data; int i; int idx; drm_buf_t *buf; int retcode = 0; - DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) ); - - DRM_DEBUG( "%d\n", request.count ); + DRM_DEBUG( "%d\n", request->count ); DRM_SPINLOCK(&dev->dma_lock); - for ( i = 0 ; i < request.count ; i++ ) { - if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) { + for ( i = 0 ; i < request->count ; i++ ) { + if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) { retcode = EFAULT; break; } @@ -1021,9 +996,8 @@ int drm_freebufs(DRM_IOCTL_ARGS) return retcode; } -int drm_mapbufs(DRM_IOCTL_ARGS) +int drm_mapbufs(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_device_dma_t *dma = dev->dma; int retcode = 0; const int zero = 0; @@ -1040,27 +1014,25 @@ int drm_mapbufs(DRM_IOCTL_ARGS) vaddr_t vaddr; #endif /* __NetBSD__ || __OpenBSD__ */ - drm_buf_map_t request; + drm_buf_map_t *request = data; int i; - DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) ); - #if defined(__NetBSD__) || defined(__OpenBSD__) if (!vfinddev(kdev, VCHR, &vn)) return 0; /* FIXME: Shouldn't this be EINVAL or something? */ #endif /* __NetBSD__ || __OpenBSD */ #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 - vms = p->td_proc->p_vmspace; + vms = DRM_CURPROC->td_proc->p_vmspace; #else - vms = p->p_vmspace; + vms = DRM_CURPROC->p_vmspace; #endif DRM_SPINLOCK(&dev->dma_lock); dev->buf_use++; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); - if (request.count < dma->buf_count) + if (request->count < dma->buf_count) goto done; if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) || @@ -1082,10 +1054,11 @@ int drm_mapbufs(DRM_IOCTL_ARGS) vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); #if __FreeBSD_version >= 600023 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, - VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff ); + VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff); #else retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, - VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff ); + VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist), + foff); #endif #elif defined(__NetBSD__) || defined(__OpenBSD__) vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ); @@ -1096,26 +1069,26 @@ int drm_mapbufs(DRM_IOCTL_ARGS) if (retcode) goto done; - request.virtual = (void *)vaddr; + request->virtual = (void *)vaddr; for ( i = 0 ; i < dma->buf_count ; i++ ) { - if (DRM_COPY_TO_USER(&request.list[i].idx, - &dma->buflist[i]->idx, sizeof(request.list[0].idx))) { + if (DRM_COPY_TO_USER(&request->list[i].idx, + &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { retcode = EFAULT; goto done; } - if (DRM_COPY_TO_USER(&request.list[i].total, - &dma->buflist[i]->total, sizeof(request.list[0].total))) { + if (DRM_COPY_TO_USER(&request->list[i].total, + &dma->buflist[i]->total, sizeof(request->list[0].total))) { retcode = EFAULT; goto done; } - if (DRM_COPY_TO_USER(&request.list[i].used, &zero, + if (DRM_COPY_TO_USER(&request->list[i].used, &zero, sizeof(zero))) { retcode = EFAULT; goto done; } address = vaddr + dma->buflist[i]->offset; /* *** */ - if (DRM_COPY_TO_USER(&request.list[i].address, &address, + if (DRM_COPY_TO_USER(&request->list[i].address, &address, sizeof(address))) { retcode = EFAULT; goto done; @@ -1123,11 +1096,9 @@ int drm_mapbufs(DRM_IOCTL_ARGS) } done: - request.count = dma->buf_count; + request->count = dma->buf_count; - DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); - - DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request)); + DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode ); return retcode; } diff --git a/bsd-core/drm_context.c b/bsd-core/drm_context.c index e8df7df5..e34e8759 100644 --- a/bsd-core/drm_context.c +++ b/bsd-core/drm_context.c @@ -136,48 +136,39 @@ void drm_ctxbitmap_cleanup(drm_device_t *dev) * Per Context SAREA Support */ -int drm_getsareactx( DRM_IOCTL_ARGS ) +int drm_getsareactx( drm_device_t *dev, void *data, struct drm_file *file_priv ) { - DRM_DEVICE; - drm_ctx_priv_map_t request; + drm_ctx_priv_map_t *request = data; drm_local_map_t *map; - DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data, - sizeof(request) ); - DRM_LOCK(); - if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) { + if (dev->max_context < 0 || + request->ctx_id >= (unsigned) dev->max_context) { DRM_UNLOCK(); return EINVAL; } - map = dev->context_sareas[request.ctx_id]; + map = dev->context_sareas[request->ctx_id]; DRM_UNLOCK(); - request.handle = map->handle; - - DRM_COPY_TO_USER_IOCTL( (drm_ctx_priv_map_t *)data, request, sizeof(request) ); + request->handle = map->handle; return 0; } -int drm_setsareactx( DRM_IOCTL_ARGS ) +int drm_setsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_ctx_priv_map_t request; + drm_ctx_priv_map_t *request = data; drm_local_map_t *map = NULL; - DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data, - sizeof(request) ); - DRM_LOCK(); TAILQ_FOREACH(map, &dev->maplist, link) { - if (map->handle == request.handle) { + if (map->handle == request->handle) { if (dev->max_context < 0) goto bad; - if (request.ctx_id >= (unsigned) dev->max_context) + if (request->ctx_id >= (unsigned) dev->max_context) goto bad; - dev->context_sareas[request.ctx_id] = map; + dev->context_sareas[request->ctx_id] = map; DRM_UNLOCK(); return 0; } @@ -225,120 +216,98 @@ int drm_context_switch_complete(drm_device_t *dev, int new) return 0; } -int drm_resctx(DRM_IOCTL_ARGS) +int drm_resctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - drm_ctx_res_t res; + drm_ctx_res_t *res = data; drm_ctx_t ctx; int i; - DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) ); - - if ( res.count >= DRM_RESERVED_CONTEXTS ) { + if ( res->count >= DRM_RESERVED_CONTEXTS ) { bzero(&ctx, sizeof(ctx)); for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { ctx.handle = i; - if ( DRM_COPY_TO_USER( &res.contexts[i], + if ( DRM_COPY_TO_USER( &res->contexts[i], &ctx, sizeof(ctx) ) ) return EFAULT; } } - res.count = DRM_RESERVED_CONTEXTS; - - DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) ); + res->count = DRM_RESERVED_CONTEXTS; return 0; } -int drm_addctx(DRM_IOCTL_ARGS) +int drm_addctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_ctx_t ctx; + drm_ctx_t *ctx = data; - DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) ); - - ctx.handle = drm_ctxbitmap_next(dev); - if ( ctx.handle == DRM_KERNEL_CONTEXT ) { + ctx->handle = drm_ctxbitmap_next(dev); + if ( ctx->handle == DRM_KERNEL_CONTEXT ) { /* Skip kernel's context and get a new one. */ - ctx.handle = drm_ctxbitmap_next(dev); + ctx->handle = drm_ctxbitmap_next(dev); } - DRM_DEBUG( "%d\n", ctx.handle ); - if ( ctx.handle == -1 ) { + DRM_DEBUG( "%d\n", ctx->handle ); + if ( ctx->handle == -1 ) { DRM_DEBUG( "Not enough free contexts.\n" ); /* Should this return -EBUSY instead? */ return ENOMEM; } - if (dev->driver.context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) { + if (dev->driver.context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) { DRM_LOCK(); - dev->driver.context_ctor(dev, ctx.handle); + dev->driver.context_ctor(dev, ctx->handle); DRM_UNLOCK(); } - DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) ); - return 0; } -int drm_modctx(DRM_IOCTL_ARGS) +int drm_modctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { /* This does nothing */ return 0; } -int drm_getctx(DRM_IOCTL_ARGS) +int drm_getctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - drm_ctx_t ctx; - - DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) ); + drm_ctx_t *ctx = data; /* This is 0, because we don't handle any context flags */ - ctx.flags = 0; - - DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) ); + ctx->flags = 0; return 0; } -int drm_switchctx(DRM_IOCTL_ARGS) +int drm_switchctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_ctx_t ctx; + drm_ctx_t *ctx = data; - DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) ); - - DRM_DEBUG( "%d\n", ctx.handle ); - return drm_context_switch(dev, dev->last_context, ctx.handle); + DRM_DEBUG( "%d\n", ctx->handle ); + return drm_context_switch(dev, dev->last_context, ctx->handle); } -int drm_newctx(DRM_IOCTL_ARGS) +int drm_newctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_ctx_t ctx; + drm_ctx_t *ctx = data; - DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) ); - - DRM_DEBUG( "%d\n", ctx.handle ); - drm_context_switch_complete(dev, ctx.handle); + DRM_DEBUG( "%d\n", ctx->handle ); + drm_context_switch_complete(dev, ctx->handle); return 0; } -int drm_rmctx(DRM_IOCTL_ARGS) +int drm_rmctx(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_ctx_t ctx; + drm_ctx_t *ctx = data; - DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) ); - - DRM_DEBUG( "%d\n", ctx.handle ); - if ( ctx.handle != DRM_KERNEL_CONTEXT ) { + DRM_DEBUG( "%d\n", ctx->handle ); + if ( ctx->handle != DRM_KERNEL_CONTEXT ) { if (dev->driver.context_dtor) { DRM_LOCK(); - dev->driver.context_dtor(dev, ctx.handle); + dev->driver.context_dtor(dev, ctx->handle); DRM_UNLOCK(); } - drm_ctxbitmap_free(dev, ctx.handle); + drm_ctxbitmap_free(dev, ctx->handle); } return 0; diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c index fc219039..fc1e1250 100644 --- a/bsd-core/drm_dma.c +++ b/bsd-core/drm_dma.c @@ -117,13 +117,11 @@ void drm_reclaim_buffers(drm_device_t *dev, struct drm_file *file_priv) } /* Call into the driver-specific DMA handler */ -int drm_dma(DRM_IOCTL_ARGS) +int drm_dma(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; if (dev->driver.dma_ioctl) { - return -dev->driver.dma_ioctl(kdev, cmd, data, flags, p, - file_priv); + return -dev->driver.dma_ioctl(dev, data, file_priv); } else { DRM_DEBUG("DMA ioctl on driver with no dma handler\n"); return EINVAL; diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index b81d0a75..14a29407 100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -64,10 +64,9 @@ drm_get_drawable_info(drm_device_t *dev, int handle) return &result->info; } -int drm_adddraw(DRM_IOCTL_ARGS) +int drm_adddraw(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_draw_t draw; + drm_draw_t *draw = data; struct bsd_drm_drawable_info *info; info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info), @@ -78,19 +77,16 @@ int drm_adddraw(DRM_IOCTL_ARGS) info->handle = alloc_unr(dev->drw_unrhdr); DRM_SPINLOCK(&dev->drw_lock); RB_INSERT(drawable_tree, &dev->drw_head, info); - draw.handle = info->handle; + draw->handle = info->handle; DRM_SPINUNLOCK(&dev->drw_lock); - DRM_DEBUG("%d\n", draw.handle); - - DRM_COPY_TO_USER_IOCTL((drm_draw_t *)data, draw, sizeof(draw)); + DRM_DEBUG("%d\n", draw->handle); return 0; } -int drm_rmdraw(DRM_IOCTL_ARGS) +int drm_rmdraw(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_draw_t *draw = (drm_draw_t *)data; struct drm_drawable_info *info; @@ -110,9 +106,8 @@ int drm_rmdraw(DRM_IOCTL_ARGS) } } -int drm_update_draw(DRM_IOCTL_ARGS) +int drm_update_draw(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_drawable_info *info; struct drm_update_draw *update = (struct drm_update_draw *)data; int ret; diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 0b33ba34..a978f50f 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -65,64 +65,64 @@ MODULE_DEPEND(drm, mem, 1, 1, 1); #endif /* __NetBSD__ || __OpenBSD__ */ static drm_ioctl_desc_t drm_ioctls[256] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { drm_version, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { drm_getmap, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { drm_getclient, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { drm_getstats, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = { drm_setversion, DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, DRM_AUTH }, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, DRM_AUTH }, + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, DRM_AUTH }, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { drm_lock, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { drm_unlock, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_noop, DRM_AUTH }, + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, DRM_AUTH|DRM_MASTER }, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { drm_dma, DRM_AUTH }, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0 }, - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = { drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; #ifdef __FreeBSD__ @@ -647,14 +647,11 @@ static void drm_unload(drm_device_t *dev) } -int drm_version(DRM_IOCTL_ARGS) +int drm_version(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_version_t version; + drm_version_t *version = data; int len; - DRM_COPY_FROM_USER_IOCTL( version, (drm_version_t *)data, sizeof(version) ); - #define DRM_COPY( name, value ) \ len = strlen( value ); \ if ( len > name##_len ) len = name##_len; \ @@ -664,15 +661,13 @@ int drm_version(DRM_IOCTL_ARGS) return EFAULT; \ } - version.version_major = dev->driver.major; - version.version_minor = dev->driver.minor; - version.version_patchlevel = dev->driver.patchlevel; + version->version_major = dev->driver.major; + version->version_minor = dev->driver.minor; + version->version_patchlevel = dev->driver.patchlevel; - DRM_COPY(version.name, dev->driver.name); - DRM_COPY(version.date, dev->driver.date); - DRM_COPY(version.desc, dev->driver.desc); - - DRM_COPY_TO_USER_IOCTL( (drm_version_t *)data, version, sizeof(version) ); + DRM_COPY(version->name, dev->driver.name); + DRM_COPY(version->date, dev->driver.date); + DRM_COPY(version->desc, dev->driver.desc); return 0; } @@ -704,8 +699,8 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) { + drm_device_t *dev = drm_get_device_from_kdev(kdev); drm_file_t *file_priv; - DRM_DEVICE; int retcode = 0; DRM_DEBUG( "open_count = %d\n", dev->open_count ); @@ -823,10 +818,17 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p) { - DRM_DEVICE; +#ifdef __FreeBSD__ + drm_device_t *dev = kdev->si_drv1; +#elif defined(__NetBSD__) + drm_device_t *dev = device_lookup(&drm_cd, minor(kdev)); +#else + drm_device_t *dev = device_lookup(&drm_cd, + minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)]; +#endif int retcode = 0; drm_ioctl_desc_t *ioctl; - int (*func)(DRM_IOCTL_ARGS); + int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv); int nr = DRM_IOCTL_NR(cmd); int is_driver_ioctl = 0; drm_file_t *file_priv; @@ -912,7 +914,7 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, if (is_driver_ioctl) DRM_LOCK(); - retcode = func(kdev, cmd, data, flags, p, file_priv); + retcode = func(dev, data, file_priv); if (is_driver_ioctl) { DRM_UNLOCK(); /* Driver ioctls in shared code follow the linux convention of diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index e450066c..ebdb2140 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -39,20 +39,15 @@ * before setunique has been called. The format for the bus-specific part of * the unique is not defined for any other bus. */ -int drm_getunique(DRM_IOCTL_ARGS) +int drm_getunique(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_unique_t u; + drm_unique_t *u = data; - DRM_COPY_FROM_USER_IOCTL( u, (drm_unique_t *)data, sizeof(u) ); - - if (u.unique_len >= dev->unique_len) { - if (DRM_COPY_TO_USER(u.unique, dev->unique, dev->unique_len)) + if (u->unique_len >= dev->unique_len) { + if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len)) return EFAULT; } - u.unique_len = dev->unique_len; - - DRM_COPY_TO_USER_IOCTL( (drm_unique_t *)data, u, sizeof(u) ); + u->unique_len = dev->unique_len; return 0; } @@ -60,28 +55,25 @@ int drm_getunique(DRM_IOCTL_ARGS) /* Deprecated in DRM version 1.1, and will return EBUSY when setversion has * requested version 1.1 or greater. */ -int drm_setunique(DRM_IOCTL_ARGS) +int drm_setunique(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_unique_t u; + drm_unique_t *u = data; int domain, bus, slot, func, ret; char *busid; - DRM_COPY_FROM_USER_IOCTL( u, (drm_unique_t *)data, sizeof(u) ); - /* Check and copy in the submitted Bus ID */ - if (!u.unique_len || u.unique_len > 1024) + if (!u->unique_len || u->unique_len > 1024) return EINVAL; - busid = malloc(u.unique_len + 1, M_DRM, M_WAITOK); + busid = malloc(u->unique_len + 1, M_DRM, M_WAITOK); if (busid == NULL) return ENOMEM; - if (DRM_COPY_FROM_USER(busid, u.unique, u.unique_len)) { + if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) { free(busid, M_DRM); return EFAULT; } - busid[u.unique_len] = '\0'; + busid[u->unique_len] = '\0'; /* Return error if the busid submitted doesn't match the device's actual * busid. @@ -109,7 +101,7 @@ int drm_setunique(DRM_IOCTL_ARGS) return EBUSY; } - dev->unique_len = u.unique_len; + dev->unique_len = u->unique_len; dev->unique = busid; DRM_UNLOCK(); @@ -143,17 +135,14 @@ drm_set_busid(drm_device_t *dev) return 0; } -int drm_getmap(DRM_IOCTL_ARGS) +int drm_getmap(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_map_t map; + drm_map_t *map = data; drm_local_map_t *mapinlist; int idx; int i = 0; - DRM_COPY_FROM_USER_IOCTL( map, (drm_map_t *)data, sizeof(map) ); - - idx = map.offset; + idx = map->offset; DRM_LOCK(); if (idx < 0) { @@ -163,12 +152,12 @@ int drm_getmap(DRM_IOCTL_ARGS) TAILQ_FOREACH(mapinlist, &dev->maplist, link) { if (i==idx) { - map.offset = mapinlist->offset; - map.size = mapinlist->size; - map.type = mapinlist->type; - map.flags = mapinlist->flags; - map.handle = mapinlist->handle; - map.mtrr = mapinlist->mtrr; + map->offset = mapinlist->offset; + map->size = mapinlist->size; + map->type = mapinlist->type; + map->flags = mapinlist->flags; + map->handle = mapinlist->handle; + map->mtrr = mapinlist->mtrr; break; } i++; @@ -179,34 +168,27 @@ int drm_getmap(DRM_IOCTL_ARGS) if (mapinlist == NULL) return EINVAL; - DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, map, sizeof(map) ); - return 0; } -int drm_getclient(DRM_IOCTL_ARGS) +int drm_getclient(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_client_t client; + drm_client_t *client = data; drm_file_t *pt; int idx; int i = 0; - DRM_COPY_FROM_USER_IOCTL( client, (drm_client_t *)data, sizeof(client) ); - - idx = client.idx; + idx = client->idx; DRM_LOCK(); TAILQ_FOREACH(pt, &dev->files, link) { if (i==idx) { - client.auth = pt->authenticated; - client.pid = pt->pid; - client.uid = pt->uid; - client.magic = pt->magic; - client.iocs = pt->ioctl_count; + client->auth = pt->authenticated; + client->pid = pt->pid; + client->uid = pt->uid; + client->magic = pt->magic; + client->iocs = pt->ioctl_count; DRM_UNLOCK(); - - *(drm_client_t *)data = client; return 0; } i++; @@ -216,10 +198,9 @@ int drm_getclient(DRM_IOCTL_ARGS) return EINVAL; } -int drm_getstats(DRM_IOCTL_ARGS) +int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_stats_t stats; + drm_stats_t *stats = data; int i; memset(&stats, 0, sizeof(stats)); @@ -228,49 +209,44 @@ int drm_getstats(DRM_IOCTL_ARGS) for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) - stats.data[i].value + stats->data[i].value = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); else - stats.data[i].value = atomic_read(&dev->counts[i]); - stats.data[i].type = dev->types[i]; + stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].type = dev->types[i]; } - stats.count = dev->counters; + stats->count = dev->counters; DRM_UNLOCK(); - DRM_COPY_TO_USER_IOCTL( (drm_stats_t *)data, stats, sizeof(stats) ); - return 0; } #define DRM_IF_MAJOR 1 #define DRM_IF_MINOR 2 -int drm_setversion(DRM_IOCTL_ARGS) +int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_set_version_t sv; + drm_set_version_t *sv = data; drm_set_version_t retv; int if_version; - DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv)); - retv.drm_di_major = DRM_IF_MAJOR; retv.drm_di_minor = DRM_IF_MINOR; retv.drm_dd_major = dev->driver.major; retv.drm_dd_minor = dev->driver.minor; - DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv)); - - if (sv.drm_di_major != -1) { - if (sv.drm_di_major != DRM_IF_MAJOR || - sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) + if (sv->drm_di_major != -1) { + if (sv->drm_di_major != DRM_IF_MAJOR || + sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { return EINVAL; - if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor); + } + if_version = DRM_IF_VERSION(sv->drm_di_major, + sv->drm_dd_minor); dev->if_version = DRM_MAX(if_version, dev->if_version); - if (sv.drm_di_minor >= 1) { + if (sv->drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device */ @@ -278,16 +254,20 @@ int drm_setversion(DRM_IOCTL_ARGS) } } - if (sv.drm_dd_major != -1) { - if (sv.drm_dd_major != dev->driver.major || - sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver.minor) + if (sv->drm_dd_major != -1) { + if (sv->drm_dd_major != dev->driver.major || + sv->drm_dd_minor < 0 || + sv->drm_dd_minor > dev->driver.minor) + { return EINVAL; + } } + return 0; } -int drm_noop(DRM_IOCTL_ARGS) +int drm_noop(drm_device_t *dev, void *data, struct drm_file *file_priv) { DRM_DEBUG("\n"); return 0; diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 95c84ab5..1ab532fe 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -33,25 +33,20 @@ static void drm_locked_task(void *context, int pending __unused); -int drm_irq_by_busid(DRM_IOCTL_ARGS) +int drm_irq_by_busid(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_irq_busid_t irq; + drm_irq_busid_t *irq = data; - DRM_COPY_FROM_USER_IOCTL(irq, (drm_irq_busid_t *)data, sizeof(irq)); - - if ((irq.busnum >> 8) != dev->pci_domain || - (irq.busnum & 0xff) != dev->pci_bus || - irq.devnum != dev->pci_slot || - irq.funcnum != dev->pci_func) + if ((irq->busnum >> 8) != dev->pci_domain || + (irq->busnum & 0xff) != dev->pci_bus || + irq->devnum != dev->pci_slot || + irq->funcnum != dev->pci_func) return EINVAL; - irq.irq = dev->irq; + irq->irq = dev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", - irq.busnum, irq.devnum, irq.funcnum, irq.irq); - - DRM_COPY_TO_USER_IOCTL( (drm_irq_busid_t *)data, irq, sizeof(irq) ); + irq->busnum, irq->devnum, irq->funcnum, irq->irq); return 0; } @@ -182,15 +177,12 @@ int drm_irq_uninstall(drm_device_t *dev) return 0; } -int drm_control(DRM_IOCTL_ARGS) +int drm_control(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_control_t ctl; + drm_control_t *ctl = data; int err; - DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) ); - - switch ( ctl.func ) { + switch ( ctl->func ) { case DRM_INST_HANDLER: /* Handle drivers whose DRM used to require IRQ setup but the * no longer does. @@ -198,7 +190,7 @@ int drm_control(DRM_IOCTL_ARGS) if (!dev->driver.use_irq) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl.irq != dev->irq) + ctl->irq != dev->irq) return EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -213,25 +205,21 @@ int drm_control(DRM_IOCTL_ARGS) } } -int drm_wait_vblank(DRM_IOCTL_ARGS) +int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_wait_vblank_t vblwait; + drm_wait_vblank_t *vblwait = data; struct timeval now; - int ret; + int ret, flags; if (!dev->irq_enabled) return EINVAL; - DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, - sizeof(vblwait) ); - - if (vblwait.request.type & _DRM_VBLANK_RELATIVE) { - vblwait.request.sequence += atomic_read(&dev->vbl_received); - vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + if (vblwait->request.type & _DRM_VBLANK_RELATIVE) { + vblwait->request.sequence += atomic_read(&dev->vbl_received); + vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; } - flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; if (flags & _DRM_VBLANK_SIGNAL) { #if 0 /* disabled */ drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM, @@ -239,11 +227,11 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if (vbl_sig == NULL) return ENOMEM; - vbl_sig->sequence = vblwait.request.sequence; - vbl_sig->signo = vblwait.request.signal; + vbl_sig->sequence = vblwait->request.sequence; + vbl_sig->signo = vblwait->request.signal; vbl_sig->pid = DRM_CURRENTPID; - vblwait.reply.sequence = atomic_read(&dev->vbl_received); + vblwait->reply.sequence = atomic_read(&dev->vbl_received); DRM_SPINLOCK(&dev->irq_lock); TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link); @@ -253,17 +241,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) ret = EINVAL; } else { DRM_LOCK(); - ret = -dev->driver.vblank_wait(dev, &vblwait.request.sequence); + ret = -dev->driver.vblank_wait(dev, + &vblwait->request.sequence); DRM_UNLOCK(); microtime(&now); - vblwait.reply.tval_sec = now.tv_sec; - vblwait.reply.tval_usec = now.tv_usec; + vblwait->reply.tval_sec = now.tv_sec; + vblwait->reply.tval_usec = now.tv_usec; } - DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, - sizeof(vblwait) ); - return ret; } diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index 54b64806..5acb13d3 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -95,29 +95,27 @@ int drm_lock_free(drm_device_t *dev, return 0; } -int drm_lock(DRM_IOCTL_ARGS) +int drm_lock(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_lock_t lock; + drm_lock_t *lock = data; int ret = 0; - DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t *)data, sizeof(lock)); - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - DRM_CURRENTPID, lock.context); + DRM_CURRENTPID, lock->context); return EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock, lock.flags); + lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock, + lock->flags); - if (dev->driver.use_dma_queue && lock.context < 0) + if (dev->driver.use_dma_queue && lock->context < 0) return EINVAL; DRM_LOCK(); for (;;) { - if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { + if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -136,7 +134,7 @@ int drm_lock(DRM_IOCTL_ARGS) break; } DRM_UNLOCK(); - DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); + DRM_DEBUG("%d %s\n", lock->context, ret ? "interrupted" : "has lock"); if (ret != 0) return ret; @@ -144,22 +142,19 @@ int drm_lock(DRM_IOCTL_ARGS) /* XXX: Add signal blocking here */ if (dev->driver.dma_quiescent != NULL && - (lock.flags & _DRM_LOCK_QUIESCENT)) + (lock->flags & _DRM_LOCK_QUIESCENT)) dev->driver.dma_quiescent(dev); return 0; } -int drm_unlock(DRM_IOCTL_ARGS) +int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_lock_t lock; + drm_lock_t *lock = data; - DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t *)data, sizeof(lock)); - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - DRM_CURRENTPID, lock.context); + DRM_CURRENTPID, lock->context); return EINVAL; } diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c index 3c0be4a0..91c3c6c5 100644 --- a/bsd-core/drm_scatter.c +++ b/bsd-core/drm_scatter.c @@ -93,43 +93,28 @@ int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) return 0; } -int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS) +int drm_sg_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_scatter_gather_t request; + drm_scatter_gather_t *request = data; int ret; DRM_DEBUG( "%s\n", __FUNCTION__ ); - - DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data, - sizeof(request) ); - - ret = drm_sg_alloc(dev, &request); - if ( ret ) return ret; - - DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data, - request, - sizeof(request) ); - - return 0; + ret = drm_sg_alloc(dev, request); + return ret; } -int drm_sg_free(DRM_IOCTL_ARGS) +int drm_sg_free(drm_device_t *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_scatter_gather_t request; + drm_scatter_gather_t *request = data; drm_sg_mem_t *entry; - DRM_COPY_FROM_USER_IOCTL( request, (drm_scatter_gather_t *)data, - sizeof(request) ); - DRM_LOCK(); entry = dev->sg; dev->sg = NULL; DRM_UNLOCK(); - if ( !entry || entry->handle != request.handle ) + if ( !entry || entry->handle != request->handle ) return EINVAL; DRM_DEBUG( "sg free virtual = 0x%lx\n", entry->handle ); diff --git a/bsd-core/drm_vm.c b/bsd-core/drm_vm.c index d8561699..af1dbaa8 100644 --- a/bsd-core/drm_vm.c +++ b/bsd-core/drm_vm.c @@ -33,7 +33,7 @@ int drm_mmap(dev_t kdev, vm_offset_t offset, int prot) paddr_t drm_mmap(dev_t kdev, off_t offset, int prot) #endif { - DRM_DEVICE; + drm_device_t *dev = drm_get_device_from_kdev(kdev); drm_local_map_t *map; drm_file_t *priv; drm_map_type_t type; diff --git a/linux-core/drmP.h b/linux-core/drmP.h index f4367955..2b7e0a44 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -275,16 +275,19 @@ do { \ return -EFAULT; \ } +struct drm_device; +struct drm_file; + /** * Ioctl function type. * - * \param inode device inode. + * \param dev DRM device structure + * \param data pointer to kernel-space stored data, copied in and out according + * to ioctl description. * \param file_priv DRM file private pointer. - * \param cmd command. - * \param arg argument. */ -typedef int drm_ioctl_t(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg); @@ -294,9 +297,16 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_ROOT_ONLY 0x4 struct drm_ioctl_desc { + unsigned int cmd; drm_ioctl_t *func; int flags; }; +/** + * Creates a driver or general drm_ioctl_desc array entry for the given + * ioctl, for use by drm_ioctl(). + */ +#define DRM_IOCTL_DEF(ioctl, func, flags) \ + [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} struct drm_magic_entry { struct list_head head; @@ -602,7 +612,6 @@ struct ati_pcigart_info { * in this family */ -struct drm_device; struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); @@ -611,7 +620,7 @@ struct drm_driver { void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); - int (*dma_ioctl) (DRM_IOCTL_ARGS); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); int (*context_ctor) (struct drm_device * dev, int context); @@ -944,71 +953,70 @@ extern void drm_init_memctl(size_t low_threshold, size_t unit_size); /* Misc. IOCTL support (drm_ioctl.h) */ -extern int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_setunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getmap(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getclient(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getstats(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_setversion(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_noop(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getmap(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_setversion(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Context IOCTL support (drm_context.h) */ -extern int drm_resctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_addctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_modctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_switchctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_newctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_rmctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_modctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_ctxbitmap_init(struct drm_device *dev); extern void drm_ctxbitmap_cleanup(struct drm_device *dev); extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); -extern int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_rmdraw(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_update_drawable_info(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_adddraw(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmdraw(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_update_drawable_info(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id); extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ -extern int drm_getmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_authmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_getmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Locking IOCTL support (drm_lock.h) */ -extern int drm_lock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_unlock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_lock(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_unlock(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); extern void drm_idlelock_take(struct drm_lock_data *lock_data); @@ -1019,7 +1027,8 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data); * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. */ -extern int drm_i_have_hw_lock(struct drm_file *file_priv); +extern int drm_i_have_hw_lock(struct drm_device *dev, + struct drm_file *file_priv); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); @@ -1028,22 +1037,22 @@ extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request extern int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr); -extern int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); -extern int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_addbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_infobufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_markbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_freebufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_addbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_markbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_freebufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_order(unsigned long size); extern unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); @@ -1061,16 +1070,16 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev, struct drm_file *filp); /* IRQ support (drm_irq.h) */ -extern int drm_control(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_control(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); extern int drm_irq_uninstall(struct drm_device *dev); extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev); extern void drm_driver_irq_uninstall(struct drm_device *dev); -extern int drm_wait_vblank(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_wait_vblank(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern void drm_vbl_send_signals(struct drm_device *dev); extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); @@ -1078,32 +1087,29 @@ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_de /* AGP/GART support (drm_agpsupport.h) */ extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); extern int drm_agp_acquire(struct drm_device *dev); -extern int drm_agp_acquire_ioctl(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_release(struct drm_device *dev); -extern int drm_agp_release_ioctl(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -extern int drm_agp_enable_ioctl(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); -extern int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); #else @@ -1137,11 +1143,11 @@ extern int drm_proc_cleanup(int minor, /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); -extern int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); -extern int drm_sg_free(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* ATI PCIGART support (ati_pcigart.h) */ extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index ab7b8c90..df54360d 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -70,19 +70,16 @@ int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) } EXPORT_SYMBOL(drm_agp_info); -int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = priv->head->dev; - struct drm_agp_info info; + struct drm_agp_info *info = data; int err; - err = drm_agp_info(dev, &info); + err = drm_agp_info(dev, info); if (err) return err; - - if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info))) - return -EFAULT; + return 0; } @@ -130,8 +127,8 @@ EXPORT_SYMBOL(drm_agp_acquire); * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); } @@ -159,11 +156,9 @@ int drm_agp_release(struct drm_device *dev) } EXPORT_SYMBOL(drm_agp_release); -int drm_agp_release_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - return drm_agp_release(dev); } @@ -194,17 +189,12 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) } EXPORT_SYMBOL(drm_agp_enable); -int drm_agp_enable_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_mode mode; + struct drm_agp_mode *mode = data; - - if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode))) - return -EFAULT; - - return drm_agp_enable(dev, mode); + return drm_agp_enable(dev, *mode); } /** @@ -254,34 +244,12 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) EXPORT_SYMBOL(drm_agp_alloc); -int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_buffer request; - struct drm_agp_buffer __user *argp = (void __user *)arg; - int err; + struct drm_agp_buffer *request = data; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - err = drm_agp_alloc(dev, &request); - if (err) - return err; - - if (copy_to_user(argp, &request, sizeof(request))) { - struct drm_agp_mem *entry; - list_for_each_entry(entry, &dev->agp->memory, head) { - if (entry->handle == request.handle) - break; - } - list_del(&entry->head); - drm_free_agp(entry->memory, entry->pages); - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); - return -EFAULT; - } - - return 0; + return drm_agp_alloc(dev, request); } /** @@ -336,17 +304,12 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_unbind); -int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_binding request; + struct drm_agp_binding *request = data; - if (copy_from_user - (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) - return -EFAULT; - - return drm_agp_unbind(dev, &request); + return drm_agp_unbind(dev, request); } @@ -386,17 +349,12 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_bind); -int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_binding request; + struct drm_agp_binding *request = data; - if (copy_from_user - (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) - return -EFAULT; - - return drm_agp_bind(dev, &request); + return drm_agp_bind(dev, request); } @@ -435,17 +393,12 @@ EXPORT_SYMBOL(drm_agp_free); -int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_buffer request; + struct drm_agp_buffer *request = data; - if (copy_from_user - (&request, (struct drm_agp_buffer __user *) arg, sizeof(request))) - return -EFAULT; - - return drm_agp_free(dev, &request); + return drm_agp_free(dev, request); } diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index f10a57b1..e35e8b6d 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -136,32 +136,29 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) * searches an unique non-zero magic number and add it associating it with \p * file_priv. */ -int drm_getmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - struct drm_device *dev = file_priv->head->dev; - struct drm_auth auth; + struct drm_auth *auth = data; /* Find unique magic */ if (file_priv->magic) { - auth.magic = file_priv->magic; + auth->magic = file_priv->magic; } else { do { spin_lock(&lock); if (!sequence) ++sequence; /* reserve 0 */ - auth.magic = sequence++; + auth->magic = sequence++; spin_unlock(&lock); - } while (drm_find_file(dev, auth.magic)); - file_priv->magic = auth.magic; - drm_add_magic(dev, file_priv, auth.magic); + } while (drm_find_file(dev, auth->magic)); + file_priv->magic = auth->magic; + drm_add_magic(dev, file_priv, auth->magic); } - DRM_DEBUG("%u\n", auth.magic); - if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) - return -EFAULT; + DRM_DEBUG("%u\n", auth->magic); + return 0; } @@ -176,19 +173,16 @@ int drm_getmagic(struct inode *inode, struct drm_file *file_priv, * * Checks if \p file_priv is associated with the magic number passed in \arg. */ -int drm_authmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_auth auth; + struct drm_auth *auth = data; struct drm_file *file; - if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) - return -EFAULT; - DRM_DEBUG("%u\n", auth.magic); - if ((file = drm_find_file(dev, auth.magic))) { + DRM_DEBUG("%u\n", auth->magic); + if ((file = drm_find_file(dev, auth->magic))) { file->authenticated = 1; - drm_remove_magic(dev, auth.magic); + drm_remove_magic(dev, auth->magic); return 0; } return -EINVAL; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 671c6232..75d89e46 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1701,13 +1701,14 @@ static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) return 0; } -int drm_bo_op_ioctl(DRM_IOCTL_ARGS) +int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_op_arg arg; - struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_op_arg curarg; + struct drm_bo_op_arg *arg = data; + struct drm_bo_op_req *req = &arg->d.req; struct drm_bo_info_rep rep; - unsigned long next; + unsigned long next = 0; + void __user *curuserarg = NULL; int ret; if (!dev->bm.initialized) { @@ -1716,10 +1717,16 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) } do { - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + if (next != 0) { + curuserarg = (void __user *)next; + if (copy_from_user(&curarg, curuserarg, + sizeof(arg)) != 0) + return -EFAULT; + arg = &curarg; + } - if (arg.handled) { - data = arg.next; + if (arg->handled) { + next = arg->next; continue; } @@ -1747,7 +1754,7 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) default: ret = -EINVAL; } - next = arg.next; + next = arg->next; /* * A signal interrupted us. Make sure the ioctl is restartable. @@ -1756,21 +1763,23 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) if (ret == -EAGAIN) return -EAGAIN; - arg.handled = 1; - arg.d.rep.ret = ret; - arg.d.rep.bo_info = rep; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); - data = next; - } while (data); + arg->handled = 1; + arg->d.rep.ret = ret; + arg->d.rep.bo_info = rep; + if (arg != data) { + if (copy_to_user(curuserarg, &curarg, + sizeof(arg)) != 0) + return -EFAULT; + } + } while (next != 0); return 0; } -int drm_bo_create_ioctl(DRM_IOCTL_ARGS) +int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_create_arg arg; - struct drm_bo_create_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_create_arg *arg = data; + struct drm_bo_create_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; struct drm_buffer_object *entry; int ret = 0; @@ -1779,8 +1788,6 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_lock_test(dev, file_priv); if (ret) goto out; @@ -1803,16 +1810,14 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) drm_bo_fill_rep_arg(entry, rep); mutex_unlock(&entry->mutex); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); out: return ret; } -int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) +int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_handle_arg arg; + struct drm_bo_handle_arg *arg = data; struct drm_user_object *uo; int ret = 0; @@ -1821,10 +1826,8 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg.handle); + uo = drm_lookup_user_object(file_priv, arg->handle); if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -1835,52 +1838,44 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) return ret; } -int drm_bo_map_ioctl(DRM_IOCTL_ARGS) +int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_map_wait_idle_arg arg; - struct drm_bo_info_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_buffer_object_map(file_priv, req->handle, req->mask, req->hint, rep); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) +int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_handle_arg arg; + struct drm_bo_handle_arg *arg = data; int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - ret = drm_buffer_object_unmap(file_priv, arg.handle); + ret = drm_buffer_object_unmap(file_priv, arg->handle); return ret; } -int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) +int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_reference_info_arg arg; - struct drm_bo_handle_arg *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; struct drm_user_object *uo; int ret; @@ -1889,8 +1884,6 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(file_priv, req->handle, drm_buffer_type, &uo); if (ret) @@ -1900,14 +1893,12 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) +int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_handle_arg arg; + struct drm_bo_handle_arg *arg = data; int ret = 0; if (!dev->bm.initialized) { @@ -1915,18 +1906,15 @@ int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - ret = drm_user_object_unref(file_priv, arg.handle, drm_buffer_type); + ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); return ret; } -int drm_bo_info_ioctl(DRM_IOCTL_ARGS) +int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_reference_info_arg arg; - struct drm_bo_handle_arg *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; int ret; if (!dev->bm.initialized) { @@ -1934,35 +1922,29 @@ int drm_bo_info_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_info(file_priv, req->handle, rep); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; } -int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) +int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_map_wait_idle_arg arg; - struct drm_bo_info_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_wait(file_priv, req->handle, req->hint, rep); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } @@ -2337,10 +2319,9 @@ int drm_bo_driver_init(struct drm_device * dev) EXPORT_SYMBOL(drm_bo_driver_init); -int drm_mm_init_ioctl(DRM_IOCTL_ARGS) +int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_init_arg arg; + struct drm_mm_init_arg *arg = data; struct drm_buffer_manager *bm = &dev->bm; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2350,24 +2331,23 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); ret = -EINVAL; - if (arg.magic != DRM_BO_INIT_MAGIC) { + if (arg->magic != DRM_BO_INIT_MAGIC) { DRM_ERROR("You are using an old libdrm that is not compatible with\n" "\tthe kernel DRM module. Please upgrade your libdrm.\n"); return -EINVAL; } - if (arg.major != DRM_BO_INIT_MAJOR) { + if (arg->major != DRM_BO_INIT_MAJOR) { DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" "\tversion don't match. Got %d, expected %d,\n", - arg.major, DRM_BO_INIT_MAJOR); + arg->major, DRM_BO_INIT_MAJOR); return -EINVAL; } - if (arg.minor > DRM_BO_INIT_MINOR) { + if (arg->minor > DRM_BO_INIT_MINOR) { DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" "\tlibdrm buffer object interface version is %d.%d.\n" "\tkernel DRM buffer object interface version is %d.%d\n", - arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); + arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); return -EINVAL; } @@ -2377,12 +2357,12 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_ERROR("DRM memory manager was not initialized.\n"); goto out; } - if (arg.mem_type == 0) { + if (arg->mem_type == 0) { DRM_ERROR("System memory buffers already initialized.\n"); goto out; } - ret = drm_bo_init_mm(dev, arg.mem_type, - arg.p_offset, arg.p_size); + ret = drm_bo_init_mm(dev, arg->mem_type, + arg->p_offset, arg->p_size); out: mutex_unlock(&dev->struct_mutex); @@ -2390,14 +2370,12 @@ out: if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) +int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_type_arg arg; + struct drm_mm_type_arg *arg = data; struct drm_buffer_manager *bm = &dev->bm; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2407,8 +2385,6 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2417,14 +2393,14 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) DRM_ERROR("DRM memory manager was not initialized\n"); goto out; } - if (arg.mem_type == 0) { + if (arg->mem_type == 0) { DRM_ERROR("No takedown for System memory buffers.\n"); goto out; } ret = 0; - if (drm_bo_clean_mm(dev, arg.mem_type)) { + if (drm_bo_clean_mm(dev, arg->mem_type)) { DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg.mem_type); + "Delaying takedown\n", arg->mem_type); } out: mutex_unlock(&dev->struct_mutex); @@ -2432,14 +2408,12 @@ out: if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) +int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_type_arg arg; + struct drm_mm_type_arg *arg = data; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2448,25 +2422,20 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg.mem_type); + ret = drm_bo_lock_mm(dev, arg->mem_type); mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->bm.init_mutex); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) +int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_type_arg arg; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2475,7 +2444,6 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2486,7 +2454,6 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index a571b817..e8864df0 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -326,33 +326,24 @@ int drm_addmap(struct drm_device *dev, unsigned int offset, EXPORT_SYMBOL(drm_addmap); -int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_map map; + struct drm_map *map = data; struct drm_map_list *maplist; - struct drm_map __user *argp = (void __user *)arg; int err; - if (copy_from_user(&map, argp, sizeof(map))) { - return -EFAULT; - } - - if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) + if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) return -EPERM; - err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, - &maplist); + err = drm_addmap_core(dev, map->offset, map->size, map->type, + map->flags, &maplist); if (err) return err; - if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) - return -EFAULT; - /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) + if (put_user((void *)(unsigned long)maplist->user_token, &map->handle)) return -EFAULT; return 0; } @@ -451,23 +442,18 @@ EXPORT_SYMBOL(drm_rmmap); * gets used by drivers that the server doesn't need to care about. This seems * unlikely. */ -int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_rmmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_map request; + struct drm_map *request = data; drm_local_map_t *map = NULL; struct drm_map_list *r_list; int ret; - if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { - return -EFAULT; - } - mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map && - r_list->user_token == (unsigned long)request.handle && + r_list->user_token == (unsigned long)request->handle && r_list->map->flags & _DRM_REMOVABLE) { map = r_list->map; break; @@ -1287,38 +1273,27 @@ EXPORT_SYMBOL(drm_addbufs_fb); * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ -int drm_addbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_addbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_buf_desc request; - struct drm_device *dev = file_priv->head->dev; + struct drm_buf_desc *request = data; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, - sizeof(request))) - return -EFAULT; - #if __OS_HAS_AGP - if (request.flags & _DRM_AGP_BUFFER) - ret = drm_addbufs_agp(dev, &request); + if (request->flags & _DRM_AGP_BUFFER) + ret = drm_addbufs_agp(dev, request); else #endif - if (request.flags & _DRM_SG_BUFFER) - ret = drm_addbufs_sg(dev, &request); - else if (request.flags & _DRM_FB_BUFFER) - ret = drm_addbufs_fb(dev, &request); + if (request->flags & _DRM_SG_BUFFER) + ret = drm_addbufs_sg(dev, request); + else if (request->flags & _DRM_FB_BUFFER) + ret = drm_addbufs_fb(dev, request); else - ret = drm_addbufs_pci(dev, &request); + ret = drm_addbufs_pci(dev, request); - if (ret == 0) { - if (copy_to_user((void __user *) arg, &request, - sizeof(request))) { - ret = -EFAULT; - } - } return ret; } @@ -1339,13 +1314,11 @@ int drm_addbufs(struct inode *inode, struct drm_file *file_priv, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_infobufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_info request; - struct drm_buf_info __user *argp = (void __user *)arg; + struct drm_buf_info *request = data; int i; int count; @@ -1363,9 +1336,6 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; @@ -1373,11 +1343,11 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, DRM_DEBUG("count = %d\n", count); - if (request.count >= count) { + if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { struct drm_buf_desc __user *to = - &request.list[count]; + &request->list[count]; struct drm_buf_entry *from = &dma->bufs[i]; struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, @@ -1404,10 +1374,7 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, } } } - request.count = count; - - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; + request->count = count; return 0; } @@ -1426,12 +1393,11 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, * * \note This ioctl is deprecated and mostly never used. */ -int drm_markbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_markbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_desc request; + struct drm_buf_desc *request = data; int order; struct drm_buf_entry *entry; @@ -1441,24 +1407,20 @@ int drm_markbufs(struct inode *inode, struct drm_file *file_priv, if (!dma) return -EINVAL; - if (copy_from_user(&request, - (struct drm_buf_desc __user *) arg, sizeof(request))) - return -EFAULT; - DRM_DEBUG("%d, %d, %d\n", - request.size, request.low_mark, request.high_mark); - order = drm_order(request.size); + request->size, request->low_mark, request->high_mark); + order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; entry = &dma->bufs[order]; - if (request.low_mark < 0 || request.low_mark > entry->buf_count) + if (request->low_mark < 0 || request->low_mark > entry->buf_count) return -EINVAL; - if (request.high_mark < 0 || request.high_mark > entry->buf_count) + if (request->high_mark < 0 || request->high_mark > entry->buf_count) return -EINVAL; - entry->freelist.low_mark = request.low_mark; - entry->freelist.high_mark = request.high_mark; + entry->freelist.low_mark = request->low_mark; + entry->freelist.high_mark = request->high_mark; return 0; } @@ -1475,12 +1437,11 @@ int drm_markbufs(struct inode *inode, struct drm_file *file_priv, * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ -int drm_freebufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_freebufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_free request; + struct drm_buf_free *request = data; int i; int idx; struct drm_buf *buf; @@ -1491,13 +1452,9 @@ int drm_freebufs(struct inode *inode, struct drm_file *file_priv, if (!dma) return -EINVAL; - if (copy_from_user(&request, - (struct drm_buf_free __user *) arg, sizeof(request))) - return -EFAULT; - - DRM_DEBUG("%d\n", request.count); - for (i = 0; i < request.count; i++) { - if (copy_from_user(&idx, &request.list[i], sizeof(idx))) + DRM_DEBUG("%d\n", request->count); + for (i = 0; i < request->count; i++) { + if (copy_from_user(&idx, &request->list[i], sizeof(idx))) return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", @@ -1530,17 +1487,15 @@ int drm_freebufs(struct inode *inode, struct drm_file *file_priv, * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ -int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; - struct drm_buf_map request; + struct drm_buf_map *request = data; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1557,10 +1512,7 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, dev->buf_use++; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - if (request.count >= dma->buf_count) { + if (request->count >= dma->buf_count) { if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) @@ -1591,28 +1543,28 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, retcode = (signed long)virtual; goto done; } - request.virtual = (void __user *)virtual; + request->virtual = (void __user *)virtual; for (i = 0; i < dma->buf_count; i++) { - if (copy_to_user(&request.list[i].idx, + if (copy_to_user(&request->list[i].idx, &dma->buflist[i]->idx, - sizeof(request.list[0].idx))) { + sizeof(request->list[0].idx))) { retcode = -EFAULT; goto done; } - if (copy_to_user(&request.list[i].total, + if (copy_to_user(&request->list[i].total, &dma->buflist[i]->total, - sizeof(request.list[0].total))) { + sizeof(request->list[0].total))) { retcode = -EFAULT; goto done; } - if (copy_to_user(&request.list[i].used, + if (copy_to_user(&request->list[i].used, &zero, sizeof(zero))) { retcode = -EFAULT; goto done; } address = virtual + dma->buflist[i]->offset; /* *** */ - if (copy_to_user(&request.list[i].address, + if (copy_to_user(&request->list[i].address, &address, sizeof(address))) { retcode = -EFAULT; goto done; @@ -1620,11 +1572,8 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, } } done: - request.count = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; + request->count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 76e13f65..7854e89c 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -140,21 +140,16 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev) * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ -int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx_priv_map __user *argp = (void __user *)arg; - struct drm_ctx_priv_map request; + struct drm_ctx_priv_map *request = data; struct drm_map *map; struct drm_map_list *_entry; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - mutex_lock(&dev->struct_mutex); - map = idr_find(&dev->ctx_idr, request.ctx_id); + map = idr_find(&dev->ctx_idr, request->ctx_id); if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -162,19 +157,17 @@ int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); - request.handle = NULL; + request->handle = NULL; list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == map) { - request.handle = + request->handle = (void *)(unsigned long)_entry->user_token; break; } } - if (request.handle == NULL) + if (request->handle == NULL) return -EINVAL; - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; return 0; } @@ -190,22 +183,17 @@ int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ -int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx_priv_map request; + struct drm_ctx_priv_map *request = data; struct drm_map *map = NULL; struct drm_map_list *r_list = NULL; - if (copy_from_user(&request, - (struct drm_ctx_priv_map __user *) arg, sizeof(request))) - return -EFAULT; - mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map - && r_list->user_token == (unsigned long) request.handle) + && r_list->user_token == (unsigned long) request->handle) goto found; } bad: @@ -217,7 +205,7 @@ int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, if (!map) goto bad; - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) + if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) goto bad; mutex_unlock(&dev->struct_mutex); @@ -296,29 +284,23 @@ static int drm_context_switch_complete(struct drm_device *dev, int new) * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ -int drm_resctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_ctx_res res; - struct drm_ctx_res __user *argp = (void __user *)arg; + struct drm_ctx_res *res = data; struct drm_ctx ctx; int i; - if (copy_from_user(&res, argp, sizeof(res))) - return -EFAULT; - - if (res.count >= DRM_RESERVED_CONTEXTS) { + if (res->count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) + if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) return -EFAULT; } } - res.count = DRM_RESERVED_CONTEXTS; + res->count = DRM_RESERVED_CONTEXTS; - if (copy_to_user(argp, &res, sizeof(res))) - return -EFAULT; return 0; } @@ -333,32 +315,27 @@ int drm_resctx(struct inode *inode, struct drm_file *file_priv, * * Get a new handle for the context and copy to userspace. */ -int drm_addctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_ctx_list *ctx_entry; - struct drm_ctx __user *argp = (void __user *)arg; - struct drm_ctx ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, argp, sizeof(ctx))) - return -EFAULT; - - ctx.handle = drm_ctxbitmap_next(dev); - if (ctx.handle == DRM_KERNEL_CONTEXT) { + ctx->handle = drm_ctxbitmap_next(dev); + if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ - ctx.handle = drm_ctxbitmap_next(dev); + ctx->handle = drm_ctxbitmap_next(dev); } - DRM_DEBUG("%d\n", ctx.handle); - if (ctx.handle == -1) { + DRM_DEBUG("%d\n", ctx->handle); + if (ctx->handle == -1) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; } - if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_ctor) - if (!dev->driver->context_ctor(dev, ctx.handle)) { + if (!dev->driver->context_ctor(dev, ctx->handle)) { DRM_DEBUG("Running out of ctxs or memory.\n"); return -ENOMEM; } @@ -371,7 +348,7 @@ int drm_addctx(struct inode *inode, struct drm_file *file_priv, } INIT_LIST_HEAD(&ctx_entry->head); - ctx_entry->handle = ctx.handle; + ctx_entry->handle = ctx->handle; ctx_entry->tag = file_priv; mutex_lock(&dev->ctxlist_mutex); @@ -379,13 +356,10 @@ int drm_addctx(struct inode *inode, struct drm_file *file_priv, ++dev->ctx_count; mutex_unlock(&dev->ctxlist_mutex); - if (copy_to_user(argp, &ctx, sizeof(ctx))) - return -EFAULT; return 0; } -int drm_modctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { /* This does nothing */ return 0; @@ -400,20 +374,13 @@ int drm_modctx(struct inode *inode, struct drm_file *file_priv, * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ -int drm_getctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_ctx __user *argp = (void __user *)arg; - struct drm_ctx ctx; - - if (copy_from_user(&ctx, argp, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; /* This is 0, because we don't handle any context flags */ - ctx.flags = 0; + ctx->flags = 0; - if (copy_to_user(argp, &ctx, sizeof(ctx))) - return -EFAULT; return 0; } @@ -428,17 +395,13 @@ int drm_getctx(struct inode *inode, struct drm_file *file_priv, * * Calls context_switch(). */ -int drm_switchctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) - return -EFAULT; - - DRM_DEBUG("%d\n", ctx.handle); - return drm_context_switch(dev, dev->last_context, ctx.handle); + DRM_DEBUG("%d\n", ctx->handle); + return drm_context_switch(dev, dev->last_context, ctx->handle); } /** @@ -452,17 +415,13 @@ int drm_switchctx(struct inode *inode, struct drm_file *file_priv, * * Calls context_switch_complete(). */ -int drm_newctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) - return -EFAULT; - - DRM_DEBUG("%d\n", ctx.handle); - drm_context_switch_complete(dev, ctx.handle); + DRM_DEBUG("%d\n", ctx->handle); + drm_context_switch_complete(dev, ctx->handle); return 0; } @@ -478,23 +437,19 @@ int drm_newctx(struct inode *inode, struct drm_file *file_priv, * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ -int drm_rmctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) - return -EFAULT; - - DRM_DEBUG("%d\n", ctx.handle); - if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { + DRM_DEBUG("%d\n", ctx->handle); + if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { file_priv->remove_auth_on_close = 1; } - if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, ctx.handle); - drm_ctxbitmap_free(dev, ctx.handle); + dev->driver->context_dtor(dev, ctx->handle); + drm_ctxbitmap_free(dev, ctx->handle); } mutex_lock(&dev->ctxlist_mutex); @@ -502,7 +457,7 @@ int drm_rmctx(struct inode *inode, struct drm_file *file_priv, struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->handle == ctx.handle) { + if (pos->handle == ctx->handle) { list_del(&pos->head); drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); --dev->ctx_count; diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 2787c9a3..1839c576 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -40,11 +40,10 @@ /** * Allocate drawable ID and memory to store information about it. */ -int drm_adddraw(DRM_IOCTL_ARGS) +int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; unsigned long irqflags; - struct drm_draw draw; + struct drm_draw *draw = data; int new_id = 0; int ret; @@ -63,11 +62,9 @@ again: spin_unlock_irqrestore(&dev->drw_lock, irqflags); - draw.handle = new_id; + draw->handle = new_id; - DRM_DEBUG("%d\n", draw.handle); - - DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw)); + DRM_DEBUG("%d\n", draw->handle); return 0; } @@ -75,69 +72,61 @@ again: /** * Free drawable ID and memory to store information about it. */ -int drm_rmdraw(DRM_IOCTL_ARGS) +int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_draw draw; + struct drm_draw *draw = data; unsigned long irqflags; - DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data, - sizeof(draw)); - spin_lock_irqsave(&dev->drw_lock, irqflags); - drm_free(drm_get_drawable_info(dev, draw.handle), + drm_free(drm_get_drawable_info(dev, draw->handle), sizeof(struct drm_drawable_info), DRM_MEM_BUFS); - idr_remove(&dev->drw_idr, draw.handle); + idr_remove(&dev->drw_idr, draw->handle); spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("%d\n", draw.handle); + DRM_DEBUG("%d\n", draw->handle); return 0; } -int drm_update_drawable_info(DRM_IOCTL_ARGS) +int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_update_draw update; + struct drm_update_draw *update = data; unsigned long irqflags; struct drm_clip_rect *rects; struct drm_drawable_info *info; int err; - DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, - sizeof(update)); - - info = idr_find(&dev->drw_idr, update.handle); + info = idr_find(&dev->drw_idr, update->handle); if (!info) { info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); if (!info) return -ENOMEM; - if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { - DRM_ERROR("No such drawable %d\n", update.handle); + if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { + DRM_ERROR("No such drawable %d\n", update->handle); drm_free(info, sizeof(*info), DRM_MEM_BUFS); return -EINVAL; } } - switch (update.type) { + switch (update->type) { case DRM_DRAWABLE_CLIPRECTS: - if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), + if (update->num != info->num_rects) { + rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } else rects = info->rects; - if (update.num && !rects) { + if (update->num && !rects) { DRM_ERROR("Failed to allocate cliprect memory\n"); err = -ENOMEM; goto error; } - if (update.num && DRM_COPY_FROM_USER(rects, + if (update->num && DRM_COPY_FROM_USER(rects, (struct drm_clip_rect __user *) - (unsigned long)update.data, - update.num * + (unsigned long)update->data, + update->num * sizeof(*rects))) { DRM_ERROR("Failed to copy cliprects from userspace\n"); err = -EFAULT; @@ -152,15 +141,15 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) } info->rects = rects; - info->num_rects = update.num; + info->num_rects = update->num; spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, update.handle); + info->num_rects, update->handle); break; default: - DRM_ERROR("Invalid update type %d\n", update.type); + DRM_ERROR("Invalid update type %d\n", update->type); return -EINVAL; } @@ -168,7 +157,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) error: if (rects != info->rects) - drm_free(rects, update.num * sizeof(struct drm_clip_rect), + drm_free(rects, update->num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); return err; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 92b07729..85e3ba47 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -51,109 +51,102 @@ static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; -static int drm_version(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv); /** Ioctl table */ static struct drm_ioctl_desc drm_ioctls[] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #if __OS_HAS_AGP - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #endif - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), - // [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + // DRM_IOCTL_DEF(DRM_IOCTL_BUFOBJ, drm_bo_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, - DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl, - DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl, - DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, - DRM_AUTH }, - - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_BO_CREATE)] = {drm_bo_create_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_DESTROY)] = {drm_bo_destroy_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_MAP)] = {drm_bo_map_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_UNMAP)] = {drm_bo_unmap_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_REFERENCE)] = {drm_bo_reference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_UNREFERENCE)] = {drm_bo_unreference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_OP)] = {drm_bo_op_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_INFO)] = {drm_bo_info_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_WAIT_IDLE)] = {drm_bo_wait_idle_ioctl, DRM_AUTH}, - + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -545,26 +538,19 @@ module_exit(drm_core_exit); * * Fills in the version information in \p arg. */ -static int drm_version(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_version __user *argp = (void __user *)arg; - struct drm_version version; + struct drm_version *version = data; int len; - if (copy_from_user(&version, argp, sizeof(version))) - return -EFAULT; + version->version_major = dev->driver->major; + version->version_minor = dev->driver->minor; + version->version_patchlevel = dev->driver->patchlevel; + DRM_COPY(version->name, dev->driver->name); + DRM_COPY(version->date, dev->driver->date); + DRM_COPY(version->desc, dev->driver->desc); - version.version_major = dev->driver->major; - version.version_minor = dev->driver->minor; - version.version_patchlevel = dev->driver->patchlevel; - DRM_COPY(version.name, dev->driver->name); - DRM_COPY(version.date, dev->driver->date); - DRM_COPY(version.desc, dev->driver->desc); - - if (copy_to_user(argp, &version, sizeof(version))) - return -EFAULT; return 0; } @@ -579,6 +565,11 @@ static int drm_version(struct inode *inode, struct drm_file *file_priv, * * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. + * + * Copies data in and out according to the size and direction given in cmd, + * which must match the ioctl cmd known by the kernel. The kernel uses a 512 + * byte stack buffer to store the ioctl arguments in kernel space. Should we + * ever need much larger ioctl arguments, we may need to allocate memory. */ int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) @@ -589,6 +580,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; + char kdata[512]; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); @@ -606,14 +598,28 @@ int drm_ioctl(struct inode *inode, struct file *filp, ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) ioctl = &drm_ioctls[nr]; - else + else { + errno = -EINVAL; goto err_i1; + } + + if (ioctl->cmd != cmd) { + retcode = -EINVAL; + goto err_i1; + } func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) func = dev->driver->dma_ioctl; + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, + _IOC_SIZE(cmd)) != 0) + retcode = -EACCES; + goto err_i1; + } + if (!func) { DRM_DEBUG("no function\n"); retcode = -EINVAL; @@ -622,8 +628,15 @@ int drm_ioctl(struct inode *inode, struct file *filp, ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { retcode = -EACCES; } else { - retcode = func(inode, file_priv, cmd, arg); + retcode = func(dev, kdata, file_priv); } + + if (cmd & IOC_OUT) { + if (copy_to_user((void __user *)arg, kdata, + _IOC_SIZE(cmd)) != 0) + retcode = -EACCES; + } + err_i1: atomic_dec(&dev->ioctl_count); if (retcode) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 3a3035e1..c4f7da15 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -565,12 +565,11 @@ struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_ return fence; } -int drm_fence_create_ioctl(DRM_IOCTL_ARGS) +int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -580,15 +579,14 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - if (arg.flags & DRM_FENCE_FLAG_EMIT) + if (arg->flags & DRM_FENCE_FLAG_EMIT) LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_object_create(dev, arg.class, - arg.type, arg.flags, &fence); + ret = drm_fence_object_create(dev, arg->class, + arg->type, arg->flags, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, - arg.flags & + arg->flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { drm_fence_usage_deref_unlocked(&fence); @@ -600,25 +598,23 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) */ atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; + arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) +int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_user_object *uo; ret = 0; @@ -627,10 +623,8 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg.handle); + uo = drm_lookup_user_object(file_priv, arg->handle); if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -641,12 +635,11 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) } -int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) +int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; struct drm_user_object *uo; unsigned long flags; @@ -657,30 +650,27 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(file_priv, arg.handle, drm_fence_type, &uo); + ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); if (ret) return ret; - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) +int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; ret = 0; if (!fm->initialized) { @@ -688,16 +678,14 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - return drm_user_object_unref(file_priv, arg.handle, drm_fence_type); + return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); } -int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) +int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -707,29 +695,25 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) +int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -739,31 +723,27 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_flush(fence, arg.type); + ret = drm_fence_object_flush(fence, arg->type); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) +int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -773,33 +753,29 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; ret = drm_fence_object_wait(fence, - arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg.type); + arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg->type); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) +int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -809,32 +785,28 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, file_priv); - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(fence, arg.flags, arg.class, - arg.type); + ret = drm_fence_object_emit(fence, arg->flags, arg->class, + arg->type); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) +int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -844,32 +816,29 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized\n"); return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg.flags, + ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, NULL, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, - arg.flags & + arg->flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; + arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 0162f113..251ee5b5 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -407,7 +407,7 @@ int drm_release(struct inode *inode, struct file *filp) dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { - if (drm_i_have_hw_lock(file_priv)) { + if (drm_i_have_hw_lock(dev, file_priv)) { dev->driver->reclaim_buffers_locked(dev, file_priv); } else { unsigned long _end=jiffies + 3*DRM_HZ; @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp) } - if (drm_i_have_hw_lock(file_priv)) { + if (drm_i_have_hw_lock(dev, file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 6f0ef149..717e23c0 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -49,22 +49,17 @@ * * Copies the bus id from drm_device::unique into user space. */ -int drm_getunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_unique __user *argp = (void __user *)arg; - struct drm_unique u; + struct drm_unique *u = data; - if (copy_from_user(&u, argp, sizeof(u))) - return -EFAULT; - if (u.unique_len >= dev->unique_len) { - if (copy_to_user(u.unique, dev->unique, dev->unique_len)) + if (u->unique_len >= dev->unique_len) { + if (copy_to_user(u->unique, dev->unique, dev->unique_len)) return -EFAULT; } - u.unique_len = dev->unique_len; - if (copy_to_user(argp, &u, sizeof(u))) - return -EFAULT; + u->unique_len = dev->unique_len; + return 0; } @@ -82,27 +77,23 @@ int drm_getunique(struct inode *inode, struct drm_file *file_priv, * in interface version 1.1 and will return EBUSY when setversion has requested * version 1.1 or greater. */ -int drm_setunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_unique u; + struct drm_unique *u = data; int domain, bus, slot, func, ret; if (dev->unique_len || dev->unique) return -EBUSY; - if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) - return -EFAULT; - - if (!u.unique_len || u.unique_len > 1024) + if (!u->unique_len || u->unique_len > 1024) return -EINVAL; - dev->unique_len = u.unique_len; - dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); + dev->unique_len = u->unique_len; + dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); if (!dev->unique) return -ENOMEM; - if (copy_from_user(dev->unique, u.unique, dev->unique_len)) + if (copy_from_user(dev->unique, u->unique, dev->unique_len)) return -EFAULT; dev->unique[dev->unique_len] = '\0'; @@ -174,20 +165,16 @@ static int drm_set_busid(struct drm_device * dev) * Searches for the mapping with the specified offset and copies its information * into userspace */ -int drm_getmap(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getmap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_map __user *argp = (void __user *)arg; - struct drm_map map; + struct drm_map *map = data; struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; - if (copy_from_user(&map, argp, sizeof(map))) - return -EFAULT; - idx = map.offset; + idx = map->offset; mutex_lock(&dev->struct_mutex); if (idx < 0) { @@ -208,16 +195,14 @@ int drm_getmap(struct inode *inode, struct drm_file *file_priv, return -EINVAL; } - map.offset = r_list->map->offset; - map.size = r_list->map->size; - map.type = r_list->map->type; - map.flags = r_list->map->flags; - map.handle = (void *)(unsigned long) r_list->user_token; - map.mtrr = r_list->map->mtrr; + map->offset = r_list->map->offset; + map->size = r_list->map->size; + map->type = r_list->map->type; + map->flags = r_list->map->flags; + map->handle = (void *)(unsigned long) r_list->user_token; + map->mtrr = r_list->map->mtrr; mutex_unlock(&dev->struct_mutex); - if (copy_to_user(argp, &map, sizeof(map))) - return -EFAULT; return 0; } @@ -234,19 +219,15 @@ int drm_getmap(struct inode *inode, struct drm_file *file_priv, * Searches for the client with the specified index and copies its information * into userspace */ -int drm_getclient(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_client __user *argp = (struct drm_client __user *)arg; - struct drm_client client; + struct drm_client *client = data; struct drm_file *pt; int idx; int i; - if (copy_from_user(&client, argp, sizeof(client))) - return -EFAULT; - idx = client.idx; + idx = client->idx; mutex_lock(&dev->struct_mutex); if (list_empty(&dev->filelist)) { @@ -260,15 +241,13 @@ int drm_getclient(struct inode *inode, struct drm_file *file_priv, break; } - client.auth = pt->authenticated; - client.pid = pt->pid; - client.uid = pt->uid; - client.magic = pt->magic; - client.iocs = pt->ioctl_count; + client->auth = pt->authenticated; + client->pid = pt->pid; + client->uid = pt->uid; + client->magic = pt->magic; + client->iocs = pt->ioctl_count; mutex_unlock(&dev->struct_mutex); - if (copy_to_user(argp, &client, sizeof(client))) - return -EFAULT; return 0; } @@ -282,32 +261,29 @@ int drm_getclient(struct inode *inode, struct drm_file *file_priv, * * \return zero on success or a negative number on failure. */ -int drm_getstats(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_stats stats; + struct drm_stats *stats = data; int i; - memset(&stats, 0, sizeof(stats)); + memset(stats, 0, sizeof(stats)); mutex_lock(&dev->struct_mutex); for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) - stats.data[i].value - = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); + stats->data[i].value = + (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); else - stats.data[i].value = atomic_read(&dev->counts[i]); - stats.data[i].type = dev->types[i]; + stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].type = dev->types[i]; } - stats.count = dev->counters; + stats->count = dev->counters; mutex_unlock(&dev->struct_mutex); - if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats))) - return -EFAULT; return 0; } @@ -322,32 +298,21 @@ int drm_getstats(struct inode *inode, struct drm_file *file_priv, * * Sets the requested interface version */ -int drm_setversion(DRM_IOCTL_ARGS) +int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_set_version sv; - struct drm_set_version retv; - int if_version; - struct drm_set_version __user *argp = (void __user *)data; - - if (copy_from_user(&sv, argp, sizeof(sv))) - return -EFAULT; + struct drm_set_version *sv = data; + int if_version, retcode; - retv.drm_di_major = DRM_IF_MAJOR; - retv.drm_di_minor = DRM_IF_MINOR; - retv.drm_dd_major = dev->driver->major; - retv.drm_dd_minor = dev->driver->minor; - - if (copy_to_user(argp, &retv, sizeof(retv))) - return -EFAULT; - - if (sv.drm_di_major != -1) { - if (sv.drm_di_major != DRM_IF_MAJOR || - sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) - return -EINVAL; - if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); + if (sv->drm_di_major != -1) { + if (sv->drm_di_major != DRM_IF_MAJOR || + sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { + retcode = -EINVAL; + goto done; + } + if_version = DRM_IF_VERSION(sv->drm_di_major, + sv->drm_di_minor); dev->if_version = max(if_version, dev->if_version); - if (sv.drm_di_minor >= 1) { + if (sv->drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device */ @@ -355,20 +320,30 @@ int drm_setversion(DRM_IOCTL_ARGS) } } - if (sv.drm_dd_major != -1) { - if (sv.drm_dd_major != dev->driver->major || - sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor) - return -EINVAL; + if (sv->drm_dd_major != -1) { + if (sv->drm_dd_major != dev->driver->major || + sv->drm_dd_minor < 0 || sv->drm_dd_minor > + dev->driver->minor) { + retcode = -EINVAL; + goto done; + } if (dev->driver->set_version) - dev->driver->set_version(dev, &sv); + dev->driver->set_version(dev, sv); } - return 0; + +done: + sv->drm_di_major = DRM_IF_MAJOR; + sv->drm_di_minor = DRM_IF_MINOR; + sv->drm_dd_major = dev->driver->major; + sv->drm_dd_minor = dev->driver->minor; + + return retcode; } /** No-op ioctl. */ -int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, - unsigned long arg) +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv) { DRM_DEBUG("\n"); return 0; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 36df557b..fe4316e0 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -50,29 +50,24 @@ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal * to that of the device that this DRM instance attached to. */ -int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_irq_busid __user *argp = (void __user *)arg; - struct drm_irq_busid p; + struct drm_irq_busid *p = data; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (copy_from_user(&p, argp, sizeof(p))) - return -EFAULT; - - if ((p.busnum >> 8) != drm_get_pci_domain(dev) || - (p.busnum & 0xff) != dev->pdev->bus->number || - p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn)) + if ((p->busnum >> 8) != drm_get_pci_domain(dev) || + (p->busnum & 0xff) != dev->pdev->bus->number || + p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; - p.irq = dev->irq; + p->irq = dev->irq; + + DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, + p->irq); - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq); - if (copy_to_user(argp, &p, sizeof(p))) - return -EFAULT; return 0; } @@ -191,23 +186,20 @@ EXPORT_SYMBOL(drm_irq_uninstall); * * Calls irq_install() or irq_uninstall() according to \p arg. */ -int drm_control(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_control(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_control ctl; + struct drm_control *ctl = data; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ - if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl))) - return -EFAULT; - switch (ctl.func) { + switch (ctl->func) { case DRM_INST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl.irq != dev->irq) + ctl->irq != dev->irq) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -238,11 +230,9 @@ int drm_control(struct inode *inode, struct drm_file *file_priv, * * If a signal is not requested, then calls vblank_wait(). */ -int drm_wait_vblank(DRM_IOCTL_ARGS) +int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - union drm_wait_vblank __user *argp = (void __user *)data; - union drm_wait_vblank vblwait; + union drm_wait_vblank *vblwait = data; struct timeval now; int ret = 0; unsigned int flags, seq; @@ -250,18 +240,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if ((!dev->irq) || (!dev->irq_enabled)) return -EINVAL; - if (copy_from_user(&vblwait, argp, sizeof(vblwait))) - return -EFAULT; - - if (vblwait.request.type & + if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", - vblwait.request.type, + vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return -EINVAL; } - flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) @@ -270,10 +257,10 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 : &dev->vbl_received); - switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { + switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: - vblwait.request.sequence += seq; - vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + vblwait->request.sequence += seq; + vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: @@ -281,8 +268,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) } if ((flags & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait.request.sequence) <= (1<<23)) { - vblwait.request.sequence = seq + 1; + (seq - vblwait->request.sequence) <= (1<<23)) { + vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_SIGNAL) { @@ -298,12 +285,13 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * that case */ list_for_each_entry(vbl_sig, vbl_sigs, head) { - if (vbl_sig->sequence == vblwait.request.sequence - && vbl_sig->info.si_signo == vblwait.request.signal + if (vbl_sig->sequence == vblwait->request.sequence + && vbl_sig->info.si_signo == + vblwait->request.signal && vbl_sig->task == current) { spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - vblwait.reply.sequence = seq; + vblwait->reply.sequence = seq; goto done; } } @@ -325,8 +313,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); - vbl_sig->sequence = vblwait.request.sequence; - vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->sequence = vblwait->request.sequence; + vbl_sig->info.si_signo = vblwait->request.signal; vbl_sig->task = current; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -335,25 +323,22 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - vblwait.reply.sequence = seq; + vblwait->reply.sequence = seq; } else { if (flags & _DRM_VBLANK_SECONDARY) { if (dev->driver->vblank_wait2) - ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); + ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); } else if (dev->driver->vblank_wait) ret = dev->driver->vblank_wait(dev, - &vblwait.request.sequence); + &vblwait->request.sequence); do_gettimeofday(&now); - vblwait.reply.tval_sec = now.tv_sec; - vblwait.reply.tval_usec = now.tv_usec; + vblwait->reply.tval_sec = now.tv_sec; + vblwait->reply.tval_usec = now.tv_usec; } done: - if (copy_to_user(argp, &vblwait, sizeof(vblwait))) - return -EFAULT; - return ret; } diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 54e34e14..b8e4a5d9 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -48,31 +48,26 @@ static int drm_notifier(void *priv); * * Add the current task to the lock wait queue, and attempt to take to lock. */ -int drm_lock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; DECLARE_WAITQUEUE(entry, current); - struct drm_lock lock; + struct drm_lock *lock = data; int ret = 0; ++file_priv->lock_count; - if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) - return -EFAULT; - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - current->pid, lock.context); + current->pid, lock->context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock.context, current->pid, - dev->lock.hw_lock->lock, lock.flags); + lock->context, current->pid, + dev->lock.hw_lock->lock, lock->flags); if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) - if (lock.context < 0) + if (lock->context < 0) return -EINVAL; add_wait_queue(&dev->lock.lock_queue, &entry); @@ -86,7 +81,7 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, ret = -EINTR; break; } - if (drm_lock_take(&dev->lock, lock.context)) { + if (drm_lock_take(&dev->lock, lock->context)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -106,7 +101,8 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, __set_current_state(TASK_RUNNING); remove_wait_queue(&dev->lock.lock_queue, &entry); - DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); + DRM_DEBUG("%d %s\n", lock->context, + ret ? "interrupted" : "has lock"); if (ret) return ret; sigemptyset(&dev->sigmask); @@ -114,24 +110,26 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); - dev->sigdata.context = lock.context; + dev->sigdata.context = lock->context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); - if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) + if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) dev->driver->dma_ready(dev); - if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { + if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) + { if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); + DRM_DEBUG("%d waiting for DMA quiescent\n", + lock->context); return -EBUSY; } } if (dev->driver->kernel_context_switch && - dev->last_context != lock.context) { + dev->last_context != lock->context) { dev->driver->kernel_context_switch(dev, dev->last_context, - lock.context); + lock->context); } return 0; @@ -148,19 +146,14 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, * * Transfer and free the lock. */ -int drm_unlock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_lock lock; + struct drm_lock *lock = data; unsigned long irqflags; - if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) - return -EFAULT; - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - current->pid, lock.context); + current->pid, lock->context); return -EINVAL; } @@ -182,7 +175,7 @@ int drm_unlock(struct inode *inode, struct drm_file *file_priv, if (dev->driver->kernel_context_switch_unlock) dev->driver->kernel_context_switch_unlock(dev); else { - if (drm_lock_free(&dev->lock,lock.context)) { + if (drm_lock_free(&dev->lock,lock->context)) { /* FIXME: Should really bail out here. */ } } @@ -389,9 +382,8 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) EXPORT_SYMBOL(drm_idlelock_release); -int drm_i_have_hw_lock(struct drm_file *file_priv) +int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { - DRM_DEVICE; return (file_priv->lock_count && dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c index c124f8f8..c196ee2b 100644 --- a/linux-core/drm_memory_debug.c +++ b/linux-core/drm_memory_debug.c @@ -291,7 +291,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { DRM_AGP_MEM *handle; diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h index 9d0dedfb..b055ac00 100644 --- a/linux-core/drm_memory_debug.h +++ b/linux-core/drm_memory_debug.h @@ -277,7 +277,7 @@ void drm_free_pages (unsigned long address, int order, int area) { #if __OS_HAS_AGP -DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { +DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) { DRM_AGP_MEM *handle; if (!pages) { diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index f792dc84..e5f2b69c 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -213,15 +213,24 @@ extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); -extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /************************************************** *TTMs */ @@ -437,21 +446,21 @@ struct drm_bo_driver { * buffer objects (drm_bo.c) */ -extern int drm_bo_create_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_map_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_reference_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_info_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_op_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 3f143833..2688479a 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -6,8 +6,6 @@ #include /* For task queue support */ #include -/** Ioctl arguments */ -#define DRM_IOCTL_ARGS struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long data /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) @@ -48,8 +46,6 @@ #define DRM_WRITEMEMORYBARRIER() wmb() /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() -/** DRM device local declaration */ -#define DRM_DEVICE struct drm_device *dev = file_priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg @@ -89,14 +85,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define MTRR_TYPE_WRCOMB 1 #endif -/** For data going into the kernel through the ioctl argument */ -#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ - if ( copy_from_user(&arg1, arg2, arg3) ) \ - return -EFAULT -/** For data going from the kernel through the ioctl argument */ -#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ - if ( copy_to_user(arg1, &arg2, arg3) ) \ - return -EFAULT /** Other copying of data to kernel space */ #define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ copy_from_user(arg1, arg2, arg3) diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 58696347..3c0f672e 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -187,49 +187,28 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) } EXPORT_SYMBOL(drm_sg_alloc); -int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_scatter_gather __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - int ret; + struct drm_scatter_gather *request = data; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - ret = drm_sg_alloc(dev, &request); - if ( ret ) return ret; - - if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(dev->sg); - return -EFAULT; - } - - - return 0; + return drm_sg_alloc(dev, request); } -int drm_sg_free(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_scatter_gather request; + struct drm_scatter_gather *request = data; struct drm_sg_mem *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - if (copy_from_user(&request, - (struct drm_scatter_gather __user *) arg, - sizeof(request))) - return -EFAULT; - entry = dev->sg; dev->sg = NULL; - if (!entry || entry->handle != request.handle) + if (!entry || entry->handle != request->handle) return -EINVAL; DRM_DEBUG("sg free virtual = %p\n", entry->virtual); diff --git a/linux-core/ffb_context.c b/linux-core/ffb_context.c index e6ae60c3..586c3503 100644 --- a/linux-core/ffb_context.c +++ b/linux-core/ffb_context.c @@ -13,7 +13,7 @@ #include "drmP.h" #include "ffb_drv.h" -static int ffb_alloc_queue(drm_device_t * dev, int is_2d_only) { +static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int i; @@ -351,7 +351,7 @@ static void FFBWait(ffb_fbcPtr ffb) } while (--limit); } -int ffb_context_switch(drm_device_t * dev, int old, int new) { +int ffb_context_switch(struct drm_device * dev, int old, int new) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; #if DRM_DMA_HISTOGRAM @@ -401,7 +401,7 @@ int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; drm_ctx_t ctx; int idx; @@ -421,7 +421,7 @@ int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; struct ffb_hw_context *hwctx; drm_ctx_t ctx; @@ -449,7 +449,7 @@ int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; struct ffb_hw_context *hwctx; drm_ctx_t ctx; @@ -480,7 +480,7 @@ int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; drm_ctx_t ctx; if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) @@ -504,7 +504,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_ctx_t ctx; drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int idx; @@ -523,7 +523,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, return 0; } -static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) +static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); @@ -537,13 +537,13 @@ static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) } } -static void ffb_driver_lastclose(drm_device_t * dev) +static void ffb_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) kfree(dev->dev_private); } -static void ffb_driver_unload(drm_device_t * dev) +static void ffb_driver_unload(struct drm_device * dev) { if (ffb_position != NULL) kfree(ffb_position); @@ -571,7 +571,7 @@ unsigned long ffb_driver_get_map_ofs(drm_map_t * map) return (map->offset & 0xffffffff); } -unsigned long ffb_driver_get_reg_ofs(drm_device_t * dev) +unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev) { ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private; diff --git a/linux-core/ffb_drv.c b/linux-core/ffb_drv.c index 9c88f061..f2b4cc7f 100644 --- a/linux-core/ffb_drv.c +++ b/linux-core/ffb_drv.c @@ -114,7 +114,7 @@ static void ffb_apply_upa_parent_ranges(int parent, return; } -static int ffb_init_one(drm_device_t *dev, int prom_node, int parent_node, +static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node, int instance) { struct linux_prom64_registers regs[2*PROMREG_MAX]; @@ -167,7 +167,7 @@ static int __init ffb_scan_siblings(int root, int instance) static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) { drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_device *dev; drm_map_list_t *r_list; struct list_head *list; drm_map_t *map; @@ -237,10 +237,10 @@ unsigned long ffb_get_unmapped_area(struct file *filp, /* This functions must be here since it references drm_numdevs) * which drm_drv.h declares. */ -static int ffb_driver_firstopen(drm_device_t *dev) +static int ffb_driver_firstopen(struct drm_device *dev) { ffb_dev_priv_t *ffb_priv; - drm_device_t *temp_dev; + struct drm_device *temp_dev; int ret = 0; int i; diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h index f76b0d92..bad3c94d 100644 --- a/linux-core/ffb_drv.h +++ b/linux-core/ffb_drv.h @@ -281,4 +281,4 @@ extern unsigned long ffb_get_unmapped_area(struct file *filp, unsigned long pgoff, unsigned long flags); extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map) -extern unsigned long ffb_driver_get_reg_ofs(drm_device_t *dev) +extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev) diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 1e74d792..7c37b4bb 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -448,98 +448,29 @@ static int i810_dma_initialize(struct drm_device * dev, return 0; } -/* i810 DRM version 1.1 used a smaller init structure with different - * ordering of values than is currently used (drm >= 1.2). There is - * no defined way to detect the XFree version to correct this problem, - * however by checking using this procedure we can detect the correct - * thing to do. - * - * #1 Read the Smaller init structure from user-space - * #2 Verify the overlay_physical is a valid physical address, or NULL - * If it isn't then we have a v1.1 client. Fix up params. - * If it is, then we have a 1.2 client... get the rest of the data. - */ -static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) +static int i810_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - - /* Get v1.1 init data */ - if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg, - sizeof(drm_i810_pre12_init_t))) { - return -EFAULT; - } - - if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { - - /* This is a v1.2 client, just get the v1.2 init data */ - DRM_INFO("Using POST v1.2 init.\n"); - if (copy_from_user(init, (drm_i810_init_t __user *) arg, - sizeof(drm_i810_init_t))) { - return -EFAULT; - } - } else { - - /* This is a v1.1 client, fix the params */ - DRM_INFO("Using PRE v1.2 init.\n"); - init->pitch_bits = init->h; - init->pitch = init->w; - init->h = init->overlay_physical; - init->w = init->overlay_offset; - init->overlay_physical = 0; - init->overlay_offset = 0; - } - - return 0; -} - -static int i810_dma_init(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) -{ - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv; - drm_i810_init_t init; + drm_i810_init_t *init = data; int retcode = 0; - /* Get only the init func */ - if (copy_from_user - (&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) - return -EFAULT; - - switch (init.func) { - case I810_INIT_DMA: - /* This case is for backward compatibility. It - * handles XFree 4.1.0 and 4.2.0, and has to - * do some parameter checking as described below. - * It will someday go away. - */ - retcode = i810_dma_init_compat(&init, arg); - if (retcode) - return retcode; - - dev_priv = drm_alloc(sizeof(drm_i810_private_t), - DRM_MEM_DRIVER); - if (dev_priv == NULL) - return -ENOMEM; - retcode = i810_dma_initialize(dev, dev_priv, &init); - break; - - default: + switch (init->func) { case I810_INIT_DMA_1_4: DRM_INFO("Using v1.4 init.\n"); - if (copy_from_user(&init, (drm_i810_init_t __user *) arg, - sizeof(drm_i810_init_t))) { - return -EFAULT; - } dev_priv = drm_alloc(sizeof(drm_i810_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; - retcode = i810_dma_initialize(dev, dev_priv, &init); + retcode = i810_dma_initialize(dev, dev_priv, init); break; case I810_CLEANUP_DMA: DRM_INFO("DMA Cleanup\n"); retcode = i810_dma_cleanup(dev); break; + default: + return -EINVAL; } return retcode; @@ -1016,45 +947,38 @@ static void i810_reclaim_buffers(struct drm_device *dev, } } -static int i810_flush_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - LOCK_TEST_WITH_RETURN(dev, file_priv); i810_flush_queue(dev); return 0; } -static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_dma_vertex(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - drm_i810_vertex_t vertex; - - if (copy_from_user - (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) - return -EFAULT; + drm_i810_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); + vertex->idx, vertex->used, vertex->discard); - if (vertex.idx < 0 || vertex.idx > dma->buf_count) + if (vertex->idx < 0 || vertex->idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex(dev, - dma->buflist[vertex.idx], - vertex.discard, vertex.used); + dma->buflist[vertex->idx], + vertex->discard, vertex->used); - atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); atomic_inc(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1062,15 +986,10 @@ static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_clear_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - drm_i810_clear_t clear; - - if (copy_from_user - (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) - return -EFAULT; + drm_i810_clear_t *clear = data; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1079,16 +998,14 @@ static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, return -EINVAL; } - i810_dma_dispatch_clear(dev, clear.flags, - clear.clear_color, clear.clear_depth); + i810_dma_dispatch_clear(dev, clear->flags, + clear->clear_color, clear->clear_depth); return 0; } -static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_swap_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - DRM_DEBUG("i810_swap_bufs\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1097,11 +1014,9 @@ static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_getage(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, - unsigned long arg) +static int i810_getage(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1111,45 +1026,39 @@ static int i810_getage(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_getbuf(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_getbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; int retcode = 0; - drm_i810_dma_t d; + drm_i810_dma_t *d = data; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) - return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, file_priv); - d.granted = 0; + d->granted = 0; - retcode = i810_dma_get_buffer(dev, &d, file_priv); + retcode = i810_dma_get_buffer(dev, d, file_priv); DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); + current->pid, retcode, d->granted); - if (copy_to_user((void __user *) arg, &d, sizeof(d))) - return -EFAULT; sarea_priv->last_dispatch = (int)hw_status[5]; return retcode; } -static int i810_copybuf(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_copybuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { /* Never copy - 2.4.x doesn't need it */ return 0; } -static int i810_docopy(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_docopy(struct drm_device *dev, void *data, + struct drm_file *file_priv) { /* Never copy - 2.4.x doesn't need it */ return 0; @@ -1215,29 +1124,25 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, ADVANCE_LP_RING(); } -static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_dma_mc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - drm_i810_mc_t mc; - - if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) - return -EFAULT; + drm_i810_mc_t *mc = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - if (mc.idx >= dma->buf_count || mc.idx < 0) + if (mc->idx >= dma->buf_count || mc->idx < 0) return -EINVAL; - i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, - mc.last_render); + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, + mc->last_render); - atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); atomic_inc(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1245,44 +1150,38 @@ static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_rstatus(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_rstatus(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); } -static int i810_ov0_info(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_ov0_info(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - drm_i810_overlay_t data; + drm_i810_overlay_t *ov = data; + + ov->offset = dev_priv->overlay_offset; + ov->physical = dev_priv->overlay_physical; - data.offset = dev_priv->overlay_offset; - data.physical = dev_priv->overlay_physical; - if (copy_to_user - ((drm_i810_overlay_t __user *) arg, &data, sizeof(data))) - return -EFAULT; return 0; } -static int i810_fstatus(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_fstatus(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); return I810_READ(0x30008); } -static int i810_ov0_flip(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_ov0_flip(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1316,10 +1215,9 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev) return 0; } -static int i810_flip_bufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_flip_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1373,21 +1271,21 @@ int i810_driver_dma_quiescent(struct drm_device * dev) } struct drm_ioctl_desc i810_ioctls[] = { - [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} + DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) }; int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index db59550d..eff61b4d 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -124,29 +124,6 @@ typedef struct _drm_i810_init { unsigned int pitch_bits; } drm_i810_init_t; -/* This is the init structure prior to v1.2 */ -typedef struct _drm_i810_pre12_init { - drm_i810_init_func_t func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int ring_map_idx; - int buffer_map_idx; -#else - unsigned int mmio_offset; - unsigned int buffers_offset; -#endif - int sarea_priv_offset; - unsigned int ring_start; - unsigned int ring_end; - unsigned int ring_size; - unsigned int front_offset; - unsigned int back_offset; - unsigned int depth_offset; - unsigned int w; - unsigned int h; - unsigned int pitch; - unsigned int pitch_bits; -} drm_i810_pre12_init_t; - /* Warning: If you change the SAREA structure you must change the Xserver * structure as well */ diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 0e9ed65d..7e162a8e 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -81,15 +81,12 @@ unsigned long sis_sman_mm_offset(void *private, void *ref) #endif -static int sis_fb_init(DRM_IOCTL_ARGS) +static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_fb_t fb; + drm_sis_fb_t *fb = data; int ret; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb)); - mutex_lock(&dev->struct_mutex); #if defined(__linux__) && defined(CONFIG_FB_SIS) { @@ -104,7 +101,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } #else ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, - fb.size >> SIS_MM_ALIGN_SHIFT); + fb->size >> SIS_MM_ALIGN_SHIFT); #endif if (ret) { @@ -114,25 +111,22 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb.offset; + dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; } static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, - unsigned long data, int pool) + void *data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; - drm_sis_mem_t mem; + drm_sis_mem_t *mem = data; int retval = 0; struct drm_memblock_item *item; - DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); - mutex_lock(&dev->struct_mutex); if (0 == ((pool == 0) ? dev_priv->vram_initialized : @@ -142,70 +136,62 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, return -EINVAL; } - mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, + mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; + item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { - mem.offset = ((pool == 0) ? + mem->offset = ((pool == 0) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); - mem.free = item->user_hash.key; - mem.size = mem.size << SIS_MM_ALIGN_SHIFT; + mem->free = item->user_hash.key; + mem->size = mem->size << SIS_MM_ALIGN_SHIFT; } else { - mem.offset = 0; - mem.size = 0; - mem.free = 0; + mem->offset = 0; + mem->size = 0; + mem->free = 0; retval = -ENOMEM; } - DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); - - DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size, - mem.offset); + DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, + mem->offset); return retval; } -static int sis_drm_free(DRM_IOCTL_ARGS) +static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t mem; + drm_sis_mem_t *mem = data; int ret; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data, - sizeof(mem)); - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem.free); + ret = drm_sman_free_key(&dev_priv->sman, mem->free); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem.free); + DRM_DEBUG("free = 0x%lx\n", mem->free); return ret; } -static int sis_fb_alloc(DRM_IOCTL_ARGS) +static int sis_fb_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE); } -static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_agp_t agp; + drm_sis_agp_t *agp = data; int ret; dev_priv = dev->dev_private; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, - sizeof(agp)); mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, - agp.size >> SIS_MM_ALIGN_SHIFT); + agp->size >> SIS_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); @@ -214,16 +200,16 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) } dev_priv->agp_initialized = 1; - dev_priv->agp_offset = agp.offset; + dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; return sis_drm_alloc(dev, file_priv, data, AGP_TYPE); } @@ -335,14 +321,12 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, } struct drm_ioctl_desc sis_ioctls[] = { - [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = - {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = - {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY} + DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 10289a89..d44c26f4 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -792,18 +792,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) */ int -via_dma_blit_sync( DRM_IOCTL_ARGS ) +via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) { - drm_via_blitsync_t sync; + drm_via_blitsync_t *sync = data; int err; - DRM_DEVICE; - DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); - - if (sync.engine >= VIA_NUM_BLIT_ENGINES) + if (sync->engine >= VIA_NUM_BLIT_ENGINES) return -EINVAL; - err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); + err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); if (-EINTR == err) err = -EAGAIN; @@ -819,17 +816,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) */ int -via_dma_blit( DRM_IOCTL_ARGS ) +via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) { - drm_via_dmablit_t xfer; + drm_via_dmablit_t *xfer = data; int err; - DRM_DEVICE; - DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); - - err = via_dmablit(dev, &xfer); - - DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); + err = via_dmablit(dev, xfer); return err; } diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 411c3d52..35ca6bfc 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -33,18 +33,15 @@ #define VIA_MM_ALIGN_SHIFT 4 #define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) -int via_agp_init(DRM_IOCTL_ARGS) +int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_agp_t agp; + drm_via_agp_t *agp = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data, - sizeof(agp)); mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, - agp.size >> VIA_MM_ALIGN_SHIFT); + agp->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); @@ -53,25 +50,22 @@ int via_agp_init(DRM_IOCTL_ARGS) } dev_priv->agp_initialized = 1; - dev_priv->agp_offset = agp.offset; + dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -int via_fb_init(DRM_IOCTL_ARGS) +int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_fb_t fb; + drm_via_fb_t *fb = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb)); - mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, - fb.size >> VIA_MM_ALIGN_SHIFT); + fb->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("VRAM memory manager initialisation error\n"); @@ -80,10 +74,10 @@ int via_fb_init(DRM_IOCTL_ARGS) } dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb.offset; + dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; @@ -123,25 +117,21 @@ void via_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -int via_mem_alloc(DRM_IOCTL_ARGS) +int via_mem_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - - drm_via_mem_t mem; + drm_via_mem_t *mem = data; int retval = 0; struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - - if (mem.type > VIA_MEM_AGP) { + if (mem->type > VIA_MEM_AGP) { DRM_ERROR("Unknown memory type allocation\n"); return -EINVAL; } mutex_lock(&dev->struct_mutex); - if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : + if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); @@ -149,42 +139,37 @@ int via_mem_alloc(DRM_IOCTL_ARGS) return -EINVAL; } - tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, + tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; + item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { - mem.offset = ((mem.type == VIA_MEM_VIDEO) ? + mem->offset = ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); - mem.index = item->user_hash.key; + mem->index = item->user_hash.key; } else { - mem.offset = 0; - mem.size = 0; - mem.index = 0; + mem->offset = 0; + mem->size = 0; + mem->index = 0; DRM_DEBUG("Video memory allocation failed\n"); retval = -ENOMEM; } - DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); return retval; } -int via_mem_free(DRM_IOCTL_ARGS) +int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_via_private_t *dev_priv = dev->dev_private; - drm_via_mem_t mem; + drm_via_mem_t *mem = data; int ret; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem.index); + ret = drm_sman_free_key(&dev_priv->sman, mem->index); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem.index); + DRM_DEBUG("free = 0x%lx\n", mem->index); return ret; } diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 05336d35..5d227d8b 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -250,23 +250,20 @@ static int i915_dma_resume(struct drm_device * dev) return 0; } -static int i915_dma_init(DRM_IOCTL_ARGS) +static int i915_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv; - drm_i915_init_t init; + drm_i915_init_t *init = data; int retcode = 0; - DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case I915_INIT_DMA: dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; - retcode = i915_initialize(dev, dev_priv, &init); + retcode = i915_initialize(dev, dev_priv, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); @@ -358,7 +355,8 @@ static int validate_cmd(int cmd) return ret; } -static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) +static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, + int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -650,22 +648,22 @@ static int i915_quiescent(struct drm_device * dev) return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); } -static int i915_flush_ioctl(DRM_IOCTL_ARGS) +static int i915_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; LOCK_TEST_WITH_RETURN(dev, file_priv); return i915_quiescent(dev); } -static int i915_batchbuffer(DRM_IOCTL_ARGS) +static int i915_batchbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; - drm_i915_batchbuffer_t batch; + drm_i915_batchbuffer_t *batch = data; int ret; if (!dev_priv->allow_batchbuffer) { @@ -673,51 +671,45 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, - sizeof(batch)); - DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", - batch.start, batch.used, batch.num_cliprects); + batch->start, batch->used, batch->num_cliprects); LOCK_TEST_WITH_RETURN(dev, file_priv); - if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, - batch.num_cliprects * - sizeof(struct drm_clip_rect))) + if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect))) return -EFAULT; - ret = i915_dispatch_batchbuffer(dev, &batch); + ret = i915_dispatch_batchbuffer(dev, batch); sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return ret; } -static int i915_cmdbuffer(DRM_IOCTL_ARGS) +static int i915_cmdbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; - drm_i915_cmdbuffer_t cmdbuf; + drm_i915_cmdbuffer_t *cmdbuf = data; int ret; - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data, - sizeof(cmdbuf)); - DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", - cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); + cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); LOCK_TEST_WITH_RETURN(dev, file_priv); - if (cmdbuf.num_cliprects && - DRM_VERIFYAREA_READ(cmdbuf.cliprects, - cmdbuf.num_cliprects * + if (cmdbuf->num_cliprects && + DRM_VERIFYAREA_READ(cmdbuf->cliprects, + cmdbuf->num_cliprects * sizeof(struct drm_clip_rect))) { DRM_ERROR("Fault accessing cliprects\n"); return -EFAULT; } - ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); + ret = i915_dispatch_cmdbuffer(dev, cmdbuf); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); return ret; @@ -749,35 +741,31 @@ static int i915_do_cleanup_pageflip(struct drm_device * dev) return 0; } -static int i915_flip_bufs(DRM_IOCTL_ARGS) +static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_i915_flip_t param; + drm_i915_flip_t *param = data; DRM_DEBUG("%s\n", __FUNCTION__); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_flip_t __user *) data, - sizeof(param)); - - if (param.pipes & ~0x3) { + if (param->pipes & ~0x3) { DRM_ERROR("Invalid pipes 0x%x, only <= 0x3 is valid\n", - param.pipes); + param->pipes); return -EINVAL; } - i915_dispatch_flip(dev, param.pipes, 0); + i915_dispatch_flip(dev, param->pipes, 0); return 0; } -static int i915_getparam(DRM_IOCTL_ARGS) +static int i915_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_getparam_t param; + drm_i915_getparam_t *param = data; int value; if (!dev_priv) { @@ -785,10 +773,7 @@ static int i915_getparam(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, - sizeof(param)); - - switch (param.param) { + switch (param->param) { case I915_PARAM_IRQ_ACTIVE: value = dev->irq ? 1 : 0; break; @@ -799,11 +784,11 @@ static int i915_getparam(DRM_IOCTL_ARGS) value = READ_BREADCRUMB(dev_priv); break; default: - DRM_ERROR("Unknown parameter %d\n", param.param); + DRM_ERROR("Unknown parameter %d\n", param->param); return -EINVAL; } - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } @@ -811,32 +796,29 @@ static int i915_getparam(DRM_IOCTL_ARGS) return 0; } -static int i915_setparam(DRM_IOCTL_ARGS) +static int i915_setparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_setparam_t param; + drm_i915_setparam_t *param = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, - sizeof(param)); - - switch (param.param) { + switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: - dev_priv->use_mi_batchbuffer_start = param.value; + dev_priv->use_mi_batchbuffer_start = param->value; break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: - dev_priv->tex_lru_log_granularity = param.value; + dev_priv->tex_lru_log_granularity = param->value; break; case I915_SETPARAM_ALLOW_BATCHBUFFER: - dev_priv->allow_batchbuffer = param.value; + dev_priv->allow_batchbuffer = param->value; break; default: - DRM_ERROR("unknown parameter %d\n", param.param); + DRM_ERROR("unknown parameter %d\n", param->param); return -EINVAL; } @@ -853,13 +835,13 @@ drm_i915_mmio_entry_t mmio_table[] = { static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t); -static int i915_mmio(DRM_IOCTL_ARGS) +static int i915_mmio(struct drm_device *dev, void *data, + struct drm_file *file_priv) { uint32_t buf[8]; - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mmio_entry_t *e; - drm_i915_mmio_t mmio; + drm_i915_mmio_t *mmio = data; void __iomem *base; int i; @@ -867,22 +849,20 @@ static int i915_mmio(DRM_IOCTL_ARGS) DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_mmio_t __user *) data, - sizeof(mmio)); - if (mmio.reg >= mmio_table_size) + if (mmio->reg >= mmio_table_size) return -EINVAL; - e = &mmio_table[mmio.reg]; + e = &mmio_table[mmio->reg]; base = (u8 *) dev_priv->mmio_map->handle + e->offset; - switch (mmio.read_write) { + switch (mmio->read_write) { case I915_MMIO_READ: if (!(e->flag & I915_MMIO_MAY_READ)) return -EINVAL; for (i = 0; i < e->size / 4; i++) buf[i] = I915_READ(e->offset + i * 4); - if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) { + if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } @@ -891,7 +871,7 @@ static int i915_mmio(DRM_IOCTL_ARGS) case I915_MMIO_WRITE: if (!(e->flag & I915_MMIO_MAY_WRITE)) return -EINVAL; - if(DRM_COPY_FROM_USER(buf, mmio.data, e->size)) { + if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } @@ -902,23 +882,21 @@ static int i915_mmio(DRM_IOCTL_ARGS) return 0; } -static int i915_set_status_page(DRM_IOCTL_ARGS) +static int i915_set_status_page(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_hws_addr_t hws; + drm_i915_hws_addr_t *hws = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, - sizeof(hws)); - DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws.addr); + DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); - dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); + dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); - dev_priv->hws_map.offset = dev->agp->base + hws.addr; + dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; dev_priv->hws_map.type = 0; dev_priv->hws_map.flags = 0; @@ -974,24 +952,24 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) } struct drm_ioctl_desc i915_ioctls[] = { - [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, - [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH }, - [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), + DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), + DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), + DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 73b3d187..e641fdc6 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -163,8 +163,10 @@ extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); extern int i915_driver_firstopen(struct drm_device *dev); /* i915_irq.c */ -extern int i915_irq_emit(DRM_IOCTL_ARGS); -extern int i915_irq_wait(DRM_IOCTL_ARGS); +extern int i915_irq_emit(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int i915_irq_wait(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); @@ -172,18 +174,25 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); extern void i915_driver_irq_preinstall(struct drm_device * dev); extern void i915_driver_irq_postinstall(struct drm_device * dev); extern void i915_driver_irq_uninstall(struct drm_device * dev); -extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); -extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); +extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int i915_emit_irq(struct drm_device * dev); extern void i915_user_irq_on(drm_i915_private_t *dev_priv); extern void i915_user_irq_off(drm_i915_private_t *dev_priv); -extern int i915_vblank_swap(DRM_IOCTL_ARGS); +extern int i915_vblank_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* i915_mem.c */ -extern int i915_mem_alloc(DRM_IOCTL_ARGS); -extern int i915_mem_free(DRM_IOCTL_ARGS); -extern int i915_mem_init_heap(DRM_IOCTL_ARGS); -extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); +extern int i915_mem_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int i915_mem_free(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int i915_mem_init_heap(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index da61997e..1056b3e6 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -416,7 +416,8 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) return ret; } -static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, +static int i915_driver_vblank_do_wait(struct drm_device *dev, + unsigned int *sequence, atomic_t *counter) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -449,11 +450,10 @@ int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) /* Needs the lock as it touches the ring. */ -int i915_irq_emit(DRM_IOCTL_ARGS) +int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_irq_emit_t emit; + drm_i915_irq_emit_t *emit = data; int result; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -463,12 +463,9 @@ int i915_irq_emit(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, - sizeof(emit)); - result = i915_emit_irq(dev); - if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -478,21 +475,18 @@ int i915_irq_emit(DRM_IOCTL_ARGS) /* Doesn't need the hardware lock. */ -int i915_irq_wait(DRM_IOCTL_ARGS) +int i915_irq_wait(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_irq_wait_t irqwait; + drm_i915_irq_wait_t *irqwait = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, - sizeof(irqwait)); - - return i915_wait_irq(dev, irqwait.irq_seq); + return i915_wait_irq(dev, irqwait->irq_seq); } static void i915_enable_interrupt (struct drm_device *dev) @@ -511,38 +505,35 @@ static void i915_enable_interrupt (struct drm_device *dev) /* Set the vblank monitor pipe */ -int i915_vblank_pipe_set(DRM_IOCTL_ARGS) +int i915_vblank_pipe_set(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_vblank_pipe_t pipe; + drm_i915_vblank_pipe_t *pipe = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, - sizeof(pipe)); - - if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { + if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { DRM_ERROR("%s called with invalid pipe 0x%x\n", - __FUNCTION__, pipe.pipe); + __FUNCTION__, pipe->pipe); return -EINVAL; } - dev_priv->vblank_pipe = pipe.pipe; + dev_priv->vblank_pipe = pipe->pipe; i915_enable_interrupt (dev); return 0; } -int i915_vblank_pipe_get(DRM_IOCTL_ARGS) +int i915_vblank_pipe_get(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_vblank_pipe_t pipe; + drm_i915_vblank_pipe_t *pipe = data; u16 flag; if (!dev_priv) { @@ -551,24 +542,23 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS) } flag = I915_READ(I915REG_INT_ENABLE_R); - pipe.pipe = 0; + pipe->pipe = 0; if (flag & VSYNC_PIPEA_FLAG) - pipe.pipe |= DRM_I915_VBLANK_PIPE_A; + pipe->pipe |= DRM_I915_VBLANK_PIPE_A; if (flag & VSYNC_PIPEB_FLAG) - pipe.pipe |= DRM_I915_VBLANK_PIPE_B; - DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe, - sizeof(pipe)); + pipe->pipe |= DRM_I915_VBLANK_PIPE_B; + return 0; } /** * Schedule buffer swap at given vertical blank. */ -int i915_vblank_swap(DRM_IOCTL_ARGS) +int i915_vblank_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_vblank_swap_t swap; + drm_i915_vblank_swap_t *swap = data; drm_i915_vbl_swap_t *vbl_swap; unsigned int pipe, seqtype, curseq; unsigned long irqflags; @@ -584,19 +574,16 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, - sizeof(swap)); - - if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | + if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS | _DRM_VBLANK_FLIP)) { - DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); + DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); return -EINVAL; } - pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; + pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; - seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); + seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); if (!(dev_priv->vblank_pipe & (1 << pipe))) { DRM_ERROR("Invalid pipe %d\n", pipe); @@ -606,34 +593,34 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); if (seqtype == _DRM_VBLANK_RELATIVE) - swap.sequence += curseq; + swap->sequence += curseq; - if ((curseq - swap.sequence) <= (1<<23)) { - if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) { - swap.sequence = curseq + 1; + if ((curseq - swap->sequence) <= (1<<23)) { + if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { + swap->sequence = curseq + 1; } else { DRM_DEBUG("Missed target sequence\n"); return -EINVAL; } } - if (swap.seqtype & _DRM_VBLANK_FLIP) { - swap.sequence--; + if (swap->seqtype & _DRM_VBLANK_FLIP) { + swap->sequence--; - if ((curseq - swap.sequence) <= (1<<23)) { + if ((curseq - swap->sequence) <= (1<<23)) { struct drm_drawable_info *drw; LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); - drw = drm_get_drawable_info(dev, swap.drawable); + drw = drm_get_drawable_info(dev, swap->drawable); if (!drw) { DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); DRM_DEBUG("Invalid drawable ID %d\n", - swap.drawable); + swap->drawable); return -EINVAL; } @@ -650,10 +637,10 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) list_for_each(list, &dev_priv->vbl_swaps.head) { vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); - if (vbl_swap->drw_id == swap.drawable && + if (vbl_swap->drw_id == swap->drawable && vbl_swap->pipe == pipe && - vbl_swap->sequence == swap.sequence) { - vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP); + vbl_swap->sequence == swap->sequence) { + vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); DRM_DEBUG("Already scheduled\n"); return 0; @@ -676,13 +663,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) DRM_DEBUG("\n"); - vbl_swap->drw_id = swap.drawable; + vbl_swap->drw_id = swap->drawable; vbl_swap->pipe = pipe; - vbl_swap->sequence = swap.sequence; - vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP); + vbl_swap->sequence = swap->sequence; + vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); if (vbl_swap->flip) - swap.sequence++; + swap->sequence++; DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); @@ -691,9 +678,6 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); - DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap, - sizeof(swap)); - return 0; } diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index e2e7018d..5bf29a1e 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -268,11 +268,11 @@ static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) /* IOCTL HANDLERS */ -int i915_mem_alloc(DRM_IOCTL_ARGS) +int i915_mem_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_mem_alloc_t alloc; + drm_i915_mem_alloc_t *alloc = data; struct mem_block *block, **heap; if (!dev_priv) { @@ -280,27 +280,25 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, - sizeof(alloc)); - - heap = get_heap(dev_priv, alloc.region); + heap = get_heap(dev_priv, alloc->region); if (!heap || !*heap) return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. */ - if (alloc.alignment < 12) - alloc.alignment = 12; + if (alloc->alignment < 12) + alloc->alignment = 12; - block = alloc_block(*heap, alloc.size, alloc.alignment, file_priv); + block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); if (!block) return -ENOMEM; mark_block(dev, block, 1); - if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { + if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, + sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -308,11 +306,11 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) return 0; } -int i915_mem_free(DRM_IOCTL_ARGS) +int i915_mem_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_mem_free_t memfree; + drm_i915_mem_free_t *memfree = data; struct mem_block *block, **heap; if (!dev_priv) { @@ -320,14 +318,11 @@ int i915_mem_free(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, - sizeof(memfree)); - - heap = get_heap(dev_priv, memfree.region); + heap = get_heap(dev_priv, memfree->region); if (!heap || !*heap) return -EFAULT; - block = find_block(*heap, memfree.region_offset); + block = find_block(*heap, memfree->region_offset); if (!block) return -EFAULT; @@ -339,11 +334,11 @@ int i915_mem_free(DRM_IOCTL_ARGS) return 0; } -int i915_mem_init_heap(DRM_IOCTL_ARGS) +int i915_mem_init_heap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_mem_init_heap_t initheap; + drm_i915_mem_init_heap_t *initheap = data; struct mem_block **heap; if (!dev_priv) { @@ -351,11 +346,7 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(initheap, - (drm_i915_mem_init_heap_t __user *) data, - sizeof(initheap)); - - heap = get_heap(dev_priv, initheap.region); + heap = get_heap(dev_priv, initheap->region); if (!heap) return -EFAULT; @@ -364,14 +355,14 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS) return -EFAULT; } - return init_heap(heap, initheap.start, initheap.size); + return init_heap(heap, initheap->start, initheap->size); } -int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) +int i915_mem_destroy_heap( struct drm_device *dev, void *data, + struct drm_file *file_priv ) { - DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_mem_destroy_heap_t destroyheap; + drm_i915_mem_destroy_heap_t *destroyheap = data; struct mem_block **heap; if ( !dev_priv ) { @@ -379,10 +370,7 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, - sizeof(destroyheap) ); - - heap = get_heap( dev_priv, destroyheap.region ); + heap = get_heap( dev_priv, destroyheap->region ); if (!heap) { DRM_ERROR("get_heap failed"); return -EFAULT; diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index 25877824..e0a67458 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -1158,21 +1158,18 @@ int mach64_do_cleanup_dma(struct drm_device * dev) /** \name IOCTL handlers */ /*@{*/ -int mach64_dma_init(DRM_IOCTL_ARGS) +int mach64_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - drm_mach64_init_t init; + drm_mach64_init_t *init = data; DRM_DEBUG("%s\n", __FUNCTION__); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(init, (drm_mach64_init_t *) data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case DRM_MACH64_INIT_DMA: - return mach64_do_dma_init(dev, &init); + return mach64_do_dma_init(dev, init); case DRM_MACH64_CLEANUP_DMA: return mach64_do_cleanup_dma(dev); } @@ -1180,9 +1177,9 @@ int mach64_dma_init(DRM_IOCTL_ARGS) return -EINVAL; } -int mach64_dma_idle(DRM_IOCTL_ARGS) +int mach64_dma_idle(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1192,9 +1189,9 @@ int mach64_dma_idle(DRM_IOCTL_ARGS) return mach64_do_dma_idle(dev_priv); } -int mach64_dma_flush(DRM_IOCTL_ARGS) +int mach64_dma_flush(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1204,9 +1201,9 @@ int mach64_dma_flush(DRM_IOCTL_ARGS) return mach64_do_dma_flush(dev_priv); } -int mach64_engine_reset(DRM_IOCTL_ARGS) +int mach64_engine_reset(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1461,7 +1458,7 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_b /** \name DMA buffer request and submission IOCTL handler */ /*@{*/ -static int mach64_dma_get_buffers(struct drm_device * dev, +static int mach64_dma_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma * d) { @@ -1493,41 +1490,37 @@ static int mach64_dma_get_buffers(struct drm_device * dev, return 0; } -int mach64_dma_buffers(DRM_IOCTL_ARGS) +int mach64_dma_buffers(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; - struct drm_dma d; + struct drm_dma *d = data; int ret = 0; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma *) data, sizeof(d)); - /* Please don't send us buffers. */ - if (d.send_count != 0) { + if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", - DRM_CURRENTPID, d.send_count); + DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ - if (d.request_count < 0 || d.request_count > dma->buf_count) { + if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - DRM_CURRENTPID, d.request_count, dma->buf_count); + DRM_CURRENTPID, d->request_count, dma->buf_count); ret = -EINVAL; } - d.granted_count = 0; + d->granted_count = 0; - if (d.request_count) { - ret = mach64_dma_get_buffers(dev, file_priv, &d); + if (d->request_count) { + ret = mach64_dma_get_buffers(dev, file_priv, d); } - DRM_COPY_TO_USER_IOCTL((struct drm_dma *) data, d, sizeof(d)); - return ret; } diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index aa9afcab..cebd4c6e 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -112,11 +112,16 @@ extern struct drm_ioctl_desc mach64_ioctls[]; extern int mach64_max_ioctl; /* mach64_dma.c */ -extern int mach64_dma_init(DRM_IOCTL_ARGS); -extern int mach64_dma_idle(DRM_IOCTL_ARGS); -extern int mach64_dma_flush(DRM_IOCTL_ARGS); -extern int mach64_engine_reset(DRM_IOCTL_ARGS); -extern int mach64_dma_buffers(DRM_IOCTL_ARGS); +extern int mach64_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_dma_idle(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_dma_flush(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_engine_reset(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_dma_buffers(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern void mach64_driver_lastclose(struct drm_device * dev); extern int mach64_init_freelist(struct drm_device * dev); @@ -140,11 +145,16 @@ extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv); extern int mach64_do_cleanup_dma(struct drm_device * dev); /* mach64_state.c */ -extern int mach64_dma_clear(DRM_IOCTL_ARGS); -extern int mach64_dma_swap(DRM_IOCTL_ARGS); -extern int mach64_dma_vertex(DRM_IOCTL_ARGS); -extern int mach64_dma_blit(DRM_IOCTL_ARGS); -extern int mach64_get_param(DRM_IOCTL_ARGS); +extern int mach64_dma_clear(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_dma_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_dma_vertex(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_dma_blit(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mach64_get_param(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index c89573e7..89b6c6ce 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -41,15 +41,15 @@ * */ struct drm_ioctl_desc mach64_ioctls[] = { - [DRM_IOCTL_NR(DRM_MACH64_INIT)] = {mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = {mach64_dma_clear, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_SWAP)] = {mach64_dma_swap, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_IDLE)] = {mach64_dma_idle, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_RESET)] = {mach64_engine_reset, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_VERTEX)] = {mach64_dma_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_BLIT)] = {mach64_dma_blit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_FLUSH)] = {mach64_dma_flush, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MACH64_GETPARAM)] = {mach64_get_param, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH), }; int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls); @@ -758,27 +758,25 @@ _blit_done: * IOCTL functions */ -int mach64_dma_clear(DRM_IOCTL_ARGS) +int mach64_dma_clear(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_mach64_clear_t clear; + drm_mach64_clear_t *clear = data; int ret; DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(clear, (drm_mach64_clear_t *) data, - sizeof(clear)); - if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; - ret = mach64_dma_dispatch_clear(dev, file_priv, clear.flags, - clear.x, clear.y, clear.w, clear.h, - clear.clear_color, clear.clear_depth); + ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags, + clear->x, clear->y, clear->w, clear->h, + clear->clear_color, + clear->clear_depth); /* Make sure we restore the 3D state next time. */ @@ -786,9 +784,9 @@ int mach64_dma_clear(DRM_IOCTL_ARGS) return ret; } -int mach64_dma_swap(DRM_IOCTL_ARGS) +int mach64_dma_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; int ret; @@ -808,12 +806,12 @@ int mach64_dma_swap(DRM_IOCTL_ARGS) return ret; } -int mach64_dma_vertex(DRM_IOCTL_ARGS) +int mach64_dma_vertex(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_mach64_vertex_t vertex; + drm_mach64_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -822,44 +820,38 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_mach64_vertex_t *) data, - sizeof(vertex)); - DRM_DEBUG("%s: pid=%d buf=%p used=%lu discard=%d\n", __FUNCTION__, DRM_CURRENTPID, - vertex.buf, vertex.used, vertex.discard); + vertex->buf, vertex->used, vertex->discard); - if (vertex.prim < 0 || vertex.prim > MACH64_PRIM_POLYGON) { - DRM_ERROR("buffer prim %d\n", vertex.prim); + if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) { + DRM_ERROR("buffer prim %d\n", vertex->prim); return -EINVAL; } - if (vertex.used > MACH64_BUFFER_SIZE || (vertex.used & 3) != 0) { + if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) { DRM_ERROR("Invalid vertex buffer size: %lu bytes\n", - vertex.used); + vertex->used); return -EINVAL; } if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; - return mach64_dma_dispatch_vertex(dev, file_priv, &vertex); + return mach64_dma_dispatch_vertex(dev, file_priv, vertex); } -int mach64_dma_blit(DRM_IOCTL_ARGS) +int mach64_dma_blit(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_mach64_blit_t blit; + drm_mach64_blit_t *blit = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data, - sizeof(blit)); - - ret = mach64_dma_dispatch_blit(dev, file_priv, &blit); + ret = mach64_dma_dispatch_blit(dev, file_priv, blit); /* Make sure we restore the 3D state next time. */ @@ -869,11 +861,11 @@ int mach64_dma_blit(DRM_IOCTL_ARGS) return ret; } -int mach64_get_param(DRM_IOCTL_ARGS) +int mach64_get_param(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mach64_private_t *dev_priv = dev->dev_private; - drm_mach64_getparam_t param; + drm_mach64_getparam_t *param = data; int value; DRM_DEBUG("%s\n", __FUNCTION__); @@ -883,10 +875,7 @@ int mach64_get_param(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(param, (drm_mach64_getparam_t *) data, - sizeof(param)); - - switch (param.param) { + switch (param->param) { case MACH64_PARAM_FRAMES_QUEUED: /* Needs lock since it calls mach64_ring_tick() */ LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -899,7 +888,7 @@ int mach64_get_param(DRM_IOCTL_ARGS) return -EINVAL; } - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index 429ffa54..a86dd31c 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -762,38 +762,31 @@ static int mga_do_dma_bootstrap(struct drm_device * dev, return err; } -int mga_dma_bootstrap(DRM_IOCTL_ARGS) +int mga_dma_bootstrap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - drm_mga_dma_bootstrap_t bootstrap; + drm_mga_dma_bootstrap_t *bootstrap = data; int err; static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; const drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; - DRM_COPY_FROM_USER_IOCTL(bootstrap, - (drm_mga_dma_bootstrap_t __user *) data, - sizeof(bootstrap)); - - err = mga_do_dma_bootstrap(dev, & bootstrap); + err = mga_do_dma_bootstrap(dev, bootstrap); if (err) { mga_do_cleanup_dma(dev, FULL_CLEANUP); return err; } if (dev_priv->agp_textures != NULL) { - bootstrap.texture_handle = dev_priv->agp_textures->offset; - bootstrap.texture_size = dev_priv->agp_textures->size; + bootstrap->texture_handle = dev_priv->agp_textures->offset; + bootstrap->texture_size = dev_priv->agp_textures->size; } else { - bootstrap.texture_handle = 0; - bootstrap.texture_size = 0; + bootstrap->texture_handle = 0; + bootstrap->texture_size = 0; } - bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; - - DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data, - bootstrap, sizeof(bootstrap)); + bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; return 0; } @@ -1010,20 +1003,17 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) return 0; } -int mga_dma_init(DRM_IOCTL_ARGS) +int mga_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - drm_mga_init_t init; + drm_mga_init_t *init = data; int err; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case MGA_INIT_DMA: - err = mga_do_init_dma(dev, &init); + err = mga_do_init_dma(dev, init); if (err) { (void) mga_do_cleanup_dma(dev, FULL_CLEANUP); } @@ -1039,29 +1029,26 @@ int mga_dma_init(DRM_IOCTL_ARGS) * Primary DMA stream management */ -int mga_dma_flush(DRM_IOCTL_ARGS) +int mga_dma_flush(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - struct drm_lock lock; + struct drm_lock *lock = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, - sizeof(lock)); - DRM_DEBUG("%s%s%s\n", - (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "", - (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", - (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); + (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", + (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", + (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); WRAP_WAIT_WITH_RETURN(dev_priv); - if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { + if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { mga_do_dma_flush(dev_priv); } - if (lock.flags & _DRM_LOCK_QUIESCENT) { + if (lock->flags & _DRM_LOCK_QUIESCENT) { #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle(dev_priv); if (ret < 0) @@ -1075,9 +1062,9 @@ int mga_dma_flush(DRM_IOCTL_ARGS) } } -int mga_dma_reset(DRM_IOCTL_ARGS) +int mga_dma_reset(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1114,45 +1101,40 @@ static int mga_dma_get_buffers(struct drm_device * dev, return 0; } -int mga_dma_buffers(DRM_IOCTL_ARGS) +int mga_dma_buffers(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - struct drm_dma __user *argp = (void __user *)data; - struct drm_dma d; + struct drm_dma *d = data; int ret = 0; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); - /* Please don't send us buffers. */ - if (d.send_count != 0) { + if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", - DRM_CURRENTPID, d.send_count); + DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ - if (d.request_count < 0 || d.request_count > dma->buf_count) { + if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - DRM_CURRENTPID, d.request_count, dma->buf_count); + DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); - d.granted_count = 0; + d->granted_count = 0; - if (d.request_count) { - ret = mga_dma_get_buffers(dev, file_priv, &d); + if (d->request_count) { + ret = mga_dma_get_buffers(dev, file_priv, d); } - DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); - return ret; } diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h index 2da31194..8254c3f1 100644 --- a/shared-core/mga_drv.h +++ b/shared-core/mga_drv.h @@ -152,11 +152,16 @@ extern struct drm_ioctl_desc mga_ioctls[]; extern int mga_max_ioctl; /* mga_dma.c */ -extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); -extern int mga_dma_init(DRM_IOCTL_ARGS); -extern int mga_dma_flush(DRM_IOCTL_ARGS); -extern int mga_dma_reset(DRM_IOCTL_ARGS); -extern int mga_dma_buffers(DRM_IOCTL_ARGS); +extern int mga_dma_bootstrap(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mga_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mga_dma_flush(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mga_dma_reset(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int mga_dma_buffers(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int mga_driver_load(struct drm_device *dev, unsigned long flags); extern int mga_driver_unload(struct drm_device * dev); extern void mga_driver_lastclose(struct drm_device * dev); diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 196d7d16..70b7caa0 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -865,24 +865,20 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit * */ -static int mga_dma_clear(DRM_IOCTL_ARGS) +static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_mga_clear_t clear; + drm_mga_clear_t *clear = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data, - sizeof(clear)); - if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; WRAP_TEST_WITH_RETURN(dev_priv); - mga_dma_dispatch_clear(dev, &clear); + mga_dma_dispatch_clear(dev, clear); /* Make sure we restore the 3D state next time. */ @@ -891,9 +887,8 @@ static int mga_dma_clear(DRM_IOCTL_ARGS) return 0; } -static int mga_dma_swap(DRM_IOCTL_ARGS) +static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -913,31 +908,26 @@ static int mga_dma_swap(DRM_IOCTL_ARGS) return 0; } -static int mga_dma_vertex(DRM_IOCTL_ARGS) +static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; - drm_mga_vertex_t vertex; + drm_mga_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(vertex, - (drm_mga_vertex_t __user *) data, - sizeof(vertex)); - - if (vertex.idx < 0 || vertex.idx > dma->buf_count) + if (vertex->idx < 0 || vertex->idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[vertex.idx]; + buf = dma->buflist[vertex->idx]; buf_priv = buf->dev_private; - buf->used = vertex.used; - buf_priv->discard = vertex.discard; + buf->used = vertex->used; + buf_priv->discard = vertex->discard; if (!mga_verify_state(dev_priv)) { - if (vertex.discard) { + if (vertex->discard) { if (buf_priv->dispatched == 1) AGE_BUFFER(buf_priv); buf_priv->dispatched = 0; @@ -953,31 +943,26 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) return 0; } -static int mga_dma_indices(DRM_IOCTL_ARGS) +static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; - drm_mga_indices_t indices; + drm_mga_indices_t *indices = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(indices, - (drm_mga_indices_t __user *) data, - sizeof(indices)); - - if (indices.idx < 0 || indices.idx > dma->buf_count) + if (indices->idx < 0 || indices->idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[indices.idx]; + buf = dma->buflist[indices->idx]; buf_priv = buf->dev_private; - buf_priv->discard = indices.discard; + buf_priv->discard = indices->discard; if (!mga_verify_state(dev_priv)) { - if (indices.discard) { + if (indices->discard) { if (buf_priv->dispatched == 1) AGE_BUFFER(buf_priv); buf_priv->dispatched = 0; @@ -988,26 +973,22 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) WRAP_TEST_WITH_RETURN(dev_priv); - mga_dma_dispatch_indices(dev, buf, indices.start, indices.end); + mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); return 0; } -static int mga_dma_iload(DRM_IOCTL_ARGS) +static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; - drm_mga_iload_t iload; + drm_mga_iload_t *iload = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data, - sizeof(iload)); - #if 0 if (mga_do_wait_for_idle(dev_priv) < 0) { if (MGA_DMA_DEBUG) @@ -1015,20 +996,20 @@ static int mga_dma_iload(DRM_IOCTL_ARGS) return -EBUSY; } #endif - if (iload.idx < 0 || iload.idx > dma->buf_count) + if (iload->idx < 0 || iload->idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[iload.idx]; + buf = dma->buflist[iload->idx]; buf_priv = buf->dev_private; - if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { + if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { mga_freelist_put(dev, buf); return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); - mga_dma_dispatch_iload(dev, buf, iload.dstorg, iload.length); + mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); /* Make sure we restore the 3D state next time. */ @@ -1037,28 +1018,24 @@ static int mga_dma_iload(DRM_IOCTL_ARGS) return 0; } -static int mga_dma_blit(DRM_IOCTL_ARGS) +static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_mga_blit_t blit; + drm_mga_blit_t *blit = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data, - sizeof(blit)); - if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; - if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) + if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg)) return -EINVAL; WRAP_TEST_WITH_RETURN(dev_priv); - mga_dma_dispatch_blit(dev, &blit); + mga_dma_dispatch_blit(dev, blit); /* Make sure we restore the 3D state next time. */ @@ -1067,11 +1044,10 @@ static int mga_dma_blit(DRM_IOCTL_ARGS) return 0; } -static int mga_getparam(DRM_IOCTL_ARGS) +static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - drm_mga_getparam_t param; + drm_mga_getparam_t *param = data; int value; if (!dev_priv) { @@ -1079,12 +1055,9 @@ static int mga_getparam(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data, - sizeof(param)); - DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); - switch (param.param) { + switch (param->param) { case MGA_PARAM_IRQ_NR: value = dev->irq; break; @@ -1095,7 +1068,7 @@ static int mga_getparam(DRM_IOCTL_ARGS) return -EINVAL; } - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -1103,11 +1076,10 @@ static int mga_getparam(DRM_IOCTL_ARGS) return 0; } -static int mga_set_fence(DRM_IOCTL_ARGS) +static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - u32 temp; + u32 *fence = data; DMA_LOCALS; if (!dev_priv) { @@ -1117,11 +1089,11 @@ static int mga_set_fence(DRM_IOCTL_ARGS) DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); - /* I would normal do this assignment in the declaration of temp, + /* I would normal do this assignment in the declaration of fence, * but dev_priv may be NULL. */ - temp = dev_priv->next_fence_to_post; + *fence = dev_priv->next_fence_to_post; dev_priv->next_fence_to_post++; BEGIN_DMA(1); @@ -1131,47 +1103,40 @@ static int mga_set_fence(DRM_IOCTL_ARGS) MGA_SOFTRAP, 0x00000000); ADVANCE_DMA(); - DRM_COPY_TO_USER_IOCTL((u32 __user *)data, temp, sizeof(u32)); - return 0; } -static int mga_wait_fence(DRM_IOCTL_ARGS) +static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - u32 fence; + u32 *fence = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); - DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); - mga_driver_fence_wait(dev, & fence); - - DRM_COPY_TO_USER_IOCTL((u32 __user *)data, fence, sizeof(u32)); + mga_driver_fence_wait(dev, fence); return 0; } struct drm_ioctl_desc mga_ioctls[] = { - [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), + DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 7ecfadd2..dd323a0b 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -269,10 +269,10 @@ extern int nouveau_load(struct drm_device *dev, unsigned long flags); extern int nouveau_firstopen(struct drm_device *dev); extern void nouveau_lastclose(struct drm_device *dev); extern int nouveau_unload(struct drm_device *dev); -extern int nouveau_ioctl_getparam(DRM_IOCTL_ARGS); -extern int nouveau_ioctl_setparam(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void nouveau_wait_for_idle(struct drm_device *dev); -extern int nouveau_ioctl_card_init(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_card_init(struct drm_device *dev, void *data, struct drm_file *file_priv); /* nouveau_mem.c */ extern int nouveau_mem_init_heap(struct mem_block **, @@ -285,8 +285,8 @@ extern void nouveau_mem_free_block(struct mem_block *); extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); extern void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap); -extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS); -extern int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, @@ -301,7 +301,7 @@ extern int nouveau_notifier_init_channel(struct drm_device *, int channel, extern void nouveau_notifier_takedown_channel(struct drm_device *, int channel); extern int nouveau_notifier_alloc(struct drm_device *, int channel, uint32_t handle, int cout, uint32_t *offset); -extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); /* nouveau_fifo.c */ extern int nouveau_fifo_init(struct drm_device *dev); @@ -342,7 +342,7 @@ extern int nouveau_gpuobj_gart_dma_new(struct drm_device *, int channel, uint32_t *o_ret); extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, struct nouveau_gpuobj **); -extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); /* nouveau_irq.c */ extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 8731c6a1..e5d3ab3c 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -472,42 +472,37 @@ nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv, * ioctls wrapping the functions ***********************************/ -static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) +static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_fifo_alloc init; + struct drm_nouveau_fifo_alloc *init = data; struct drm_map_list *entry; struct nouveau_fifo *chan; int res; - DRM_COPY_FROM_USER_IOCTL(init, - (struct drm_nouveau_fifo_alloc __user *) data, - sizeof(init)); - - if (init.fb_ctxdma_handle == ~0 || init.tt_ctxdma_handle == ~0) + if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return -EINVAL; - res = nouveau_fifo_alloc(dev, &init.channel, file_priv, - init.fb_ctxdma_handle, - init.tt_ctxdma_handle); + res = nouveau_fifo_alloc(dev, &init->channel, file_priv, + init->fb_ctxdma_handle, + init->tt_ctxdma_handle); if (res) return res; - chan = dev_priv->fifos[init.channel]; + chan = dev_priv->fifos[init->channel]; - init.put_base = chan->pushbuf_base; + init->put_base = chan->pushbuf_base; /* make the fifo available to user space */ /* first, the fifo control regs */ - init.ctrl = dev_priv->mmio->offset; + init->ctrl = dev_priv->mmio->offset; if (dev_priv->card_type < NV_50) { - init.ctrl += NV03_FIFO_REGS(init.channel); - init.ctrl_size = NV03_FIFO_REGS_SIZE; + init->ctrl += NV03_FIFO_REGS(init->channel); + init->ctrl_size = NV03_FIFO_REGS_SIZE; } else { - init.ctrl += NV50_FIFO_REGS(init.channel); - init.ctrl_size = NV50_FIFO_REGS_SIZE; + init->ctrl += NV50_FIFO_REGS(init->channel); + init->ctrl_size = NV50_FIFO_REGS_SIZE; } - res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS, + res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS, 0, &chan->regs); if (res != 0) return res; @@ -515,18 +510,16 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) entry = drm_find_matching_map(dev, chan->regs); if (!entry) return -EINVAL; - init.ctrl = entry->user_token; + init->ctrl = entry->user_token; /* pass back FIFO map info to the caller */ - init.cmdbuf = chan->pushbuf_mem->map_handle; - init.cmdbuf_size = chan->pushbuf_mem->size; + init->cmdbuf = chan->pushbuf_mem->map_handle; + init->cmdbuf_size = chan->pushbuf_mem->size; /* and the notifier block */ - init.notifier = chan->notifier_block->map_handle; - init.notifier_size = chan->notifier_block->size; + init->notifier = chan->notifier_block->map_handle; + init->notifier_size = chan->notifier_block->size; - DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_fifo_alloc __user *)data, - init, sizeof(init)); return 0; } @@ -535,13 +528,13 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { - [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_SETPARAM)] = {nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_NOUVEAU_FIFO_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIER_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 6a4818c5..a7044c94 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -547,11 +547,10 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) * Ioctls */ -int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) +int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_mem_alloc alloc; + struct drm_nouveau_mem_alloc *alloc = data; struct mem_block *block; if (!dev_priv) { @@ -559,42 +558,30 @@ int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(alloc, - (struct drm_nouveau_mem_alloc_t __user *) data, - sizeof(alloc)); - - block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, - file_priv); + block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size, + alloc->flags, file_priv); if (!block) return -ENOMEM; - alloc.map_handle=block->map_handle; - alloc.offset=block->start; - alloc.flags=block->flags; - - DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_mem_alloc __user *)data, - alloc, sizeof(alloc)); + alloc->map_handle=block->map_handle; + alloc->offset=block->start; + alloc->flags=block->flags; return 0; } -int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) +int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_mem_free memfree; + struct drm_nouveau_mem_free *memfree = data; struct mem_block *block; - DRM_COPY_FROM_USER_IOCTL(memfree, - (struct drm_nouveau_mem_free_t __user *)data, - sizeof(memfree)); - block=NULL; - if (memfree.flags&NOUVEAU_MEM_FB) - block = find_block(dev_priv->fb_heap, memfree.offset); - else if (memfree.flags&NOUVEAU_MEM_AGP) - block = find_block(dev_priv->agp_heap, memfree.offset); - else if (memfree.flags&NOUVEAU_MEM_PCI) - block = find_block(dev_priv->pci_heap, memfree.offset); + if (memfree->flags & NOUVEAU_MEM_FB) + block = find_block(dev_priv->fb_heap, memfree->offset); + else if (memfree->flags & NOUVEAU_MEM_AGP) + block = find_block(dev_priv->agp_heap, memfree->offset); + else if (memfree->flags & NOUVEAU_MEM_PCI) + block = find_block(dev_priv->pci_heap, memfree->offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 6a78bb23..24a306e8 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -128,29 +128,22 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, } int -nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) +nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_nouveau_notifier_alloc na; + struct drm_nouveau_notifier_alloc *na = data; int ret; - DRM_COPY_FROM_USER_IOCTL(na, - (struct drm_nouveau_notifier_alloc __user*)data, - sizeof(na)); - - if (!nouveau_fifo_owner(dev, file_priv, na.channel)) { + if (!nouveau_fifo_owner(dev, file_priv, na->channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, na.channel); + DRM_CURRENTPID, na->channel); return -EPERM; } - ret = nouveau_notifier_alloc(dev, na.channel, na.handle, - na.count, &na.offset); + ret = nouveau_notifier_alloc(dev, na->channel, na->handle, + na->count, &na->offset); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_notifier_alloc __user*)data, - na, sizeof(na)); return 0; } diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index f11cc115..e8b12bb7 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -1024,40 +1024,37 @@ nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) } -int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) +int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_nouveau_grobj_alloc init; + struct drm_nouveau_grobj_alloc *init = data; struct nouveau_gpuobj *gr = NULL; int ret; - DRM_COPY_FROM_USER_IOCTL(init, - (struct drm_nouveau_grobj_alloc_t __user*)data, - sizeof(init)); - - if (!nouveau_fifo_owner(dev, file_priv, init.channel)) { + if (!nouveau_fifo_owner(dev, file_priv, init->channel)) { DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, init.channel); + DRM_CURRENTPID, init->channel); return -EINVAL; } //FIXME: check args, only allow trusted objects to be created - if (init.handle == ~0) + if (init->handle == ~0) return -EINVAL; - if (nouveau_gpuobj_ref_find(dev, init.channel, init.handle, NULL) == 0) + if (nouveau_gpuobj_ref_find(dev, init->channel, init->handle, NULL) == + 0) return -EEXIST; - if ((ret = nouveau_gpuobj_gr_new(dev, init.channel, init.class, &gr))) { + ret = nouveau_gpuobj_gr_new(dev, init->channel, init->class, &gr); + if (ret) { DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", - ret, init.channel, init.handle); + ret, init->channel, init->handle); return ret; } - if ((ret = nouveau_gpuobj_ref_add(dev, init.channel, init.handle, + if ((ret = nouveau_gpuobj_ref_add(dev, init->channel, init->handle, gr, NULL))) { DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", - ret, init.channel, init.handle); + ret, init->channel, init->handle); nouveau_gpuobj_del(dev, &gr); return ret; } diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index aea6bcf5..f45f2783 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -427,40 +427,35 @@ int nouveau_unload(struct drm_device *dev) return 0; } -int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) +int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_getparam getparam; + struct drm_nouveau_getparam *getparam = data; - DRM_COPY_FROM_USER_IOCTL(getparam, - (struct drm_nouveau_getparam __user *)data, - sizeof(getparam)); - - switch (getparam.param) { + switch (getparam->param) { case NOUVEAU_GETPARAM_PCI_VENDOR: - getparam.value=dev->pci_vendor; + getparam->value=dev->pci_vendor; break; case NOUVEAU_GETPARAM_PCI_DEVICE: - getparam.value=dev->pci_device; + getparam->value=dev->pci_device; break; case NOUVEAU_GETPARAM_BUS_TYPE: if (drm_device_is_agp(dev)) - getparam.value=NV_AGP; + getparam->value=NV_AGP; else if (drm_device_is_pcie(dev)) - getparam.value=NV_PCIE; + getparam->value=NV_PCIE; else - getparam.value=NV_PCI; + getparam->value=NV_PCI; break; case NOUVEAU_GETPARAM_FB_PHYSICAL: - getparam.value=dev_priv->fb_phys; + getparam->value=dev_priv->fb_phys; break; case NOUVEAU_GETPARAM_AGP_PHYSICAL: - getparam.value=dev_priv->gart_info.aper_base; + getparam->value=dev_priv->gart_info.aper_base; break; case NOUVEAU_GETPARAM_PCI_PHYSICAL: if ( dev -> sg ) - getparam.value=(uint64_t) dev->sg->virtual; + getparam->value=(uint64_t) dev->sg->virtual; else { DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); @@ -468,34 +463,27 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) } break; case NOUVEAU_GETPARAM_FB_SIZE: - getparam.value=dev_priv->fb_available_size; + getparam->value=dev_priv->fb_available_size; break; case NOUVEAU_GETPARAM_AGP_SIZE: - getparam.value=dev_priv->gart_info.aper_size; + getparam->value=dev_priv->gart_info.aper_size; break; default: - DRM_ERROR("unknown parameter %lld\n", getparam.param); + DRM_ERROR("unknown parameter %lld\n", getparam->param); return -EINVAL; } - DRM_COPY_TO_USER_IOCTL((struct drm_nouveau_getparam __user *)data, - getparam, sizeof(getparam)); return 0; } -int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) +int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_setparam setparam; + struct drm_nouveau_setparam *setparam = data; - DRM_COPY_FROM_USER_IOCTL(setparam, - (struct drm_nouveau_setparam __user *)data, - sizeof(setparam)); - - switch (setparam.param) { + switch (setparam->param) { case NOUVEAU_SETPARAM_CMDBUF_LOCATION: - switch (setparam.value) { + switch (setparam->value) { case NOUVEAU_MEM_AGP: case NOUVEAU_MEM_FB: case NOUVEAU_MEM_PCI: @@ -503,16 +491,16 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) break; default: DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n", - setparam.value); + setparam->value); return -EINVAL; } - dev_priv->config.cmdbuf.location = setparam.value; + dev_priv->config.cmdbuf.location = setparam->value; break; case NOUVEAU_SETPARAM_CMDBUF_SIZE: - dev_priv->config.cmdbuf.size = setparam.value; + dev_priv->config.cmdbuf.size = setparam->value; break; default: - DRM_ERROR("unknown parameter %lld\n", setparam.param); + DRM_ERROR("unknown parameter %lld\n", setparam->param); return -EINVAL; } diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index f91e9031..5bed45bc 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -623,21 +623,17 @@ int r128_do_cleanup_cce(struct drm_device * dev) return 0; } -int r128_cce_init(DRM_IOCTL_ARGS) +int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_r128_init_t init; + drm_r128_init_t *init = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case R128_INIT_CCE: - return r128_do_init_cce(dev, &init); + return r128_do_init_cce(dev, init); case R128_CLEANUP_CCE: return r128_do_cleanup_cce(dev); } @@ -645,9 +641,8 @@ int r128_cce_init(DRM_IOCTL_ARGS) return -EINVAL; } -int r128_cce_start(DRM_IOCTL_ARGS) +int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -666,30 +661,26 @@ int r128_cce_start(DRM_IOCTL_ARGS) /* Stop the CCE. The engine must have been idled before calling this * routine. */ -int r128_cce_stop(DRM_IOCTL_ARGS) +int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_cce_stop_t stop; + drm_r128_cce_stop_t *stop = data; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data, - sizeof(stop)); - /* Flush any pending CCE commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ - if (stop.flush) { + if (stop->flush) { r128_do_cce_flush(dev_priv); } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ - if (stop.idle) { + if (stop->idle) { ret = r128_do_cce_idle(dev_priv); if (ret) return ret; @@ -709,9 +700,8 @@ int r128_cce_stop(DRM_IOCTL_ARGS) /* Just reset the CCE ring. Called as part of an X Server engine reset. */ -int r128_cce_reset(DRM_IOCTL_ARGS) +int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -730,9 +720,8 @@ int r128_cce_reset(DRM_IOCTL_ARGS) return 0; } -int r128_cce_idle(DRM_IOCTL_ARGS) +int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -745,9 +734,8 @@ int r128_cce_idle(DRM_IOCTL_ARGS) return r128_do_cce_idle(dev_priv); } -int r128_engine_reset(DRM_IOCTL_ARGS) +int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -755,7 +743,7 @@ int r128_engine_reset(DRM_IOCTL_ARGS) return r128_do_engine_reset(dev); } -int r128_fullscreen(DRM_IOCTL_ARGS) +int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) { return -EINVAL; } @@ -910,41 +898,35 @@ static int r128_cce_get_buffers(struct drm_device * dev, return 0; } -int r128_cce_buffers(DRM_IOCTL_ARGS) +int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; int ret = 0; - struct drm_dma __user *argp = (void __user *)data; - struct drm_dma d; + struct drm_dma *d = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); - /* Please don't send us buffers. */ - if (d.send_count != 0) { + if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", - DRM_CURRENTPID, d.send_count); + DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ - if (d.request_count < 0 || d.request_count > dma->buf_count) { + if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - DRM_CURRENTPID, d.request_count, dma->buf_count); + DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } - d.granted_count = 0; + d->granted_count = 0; - if (d.request_count) { - ret = r128_cce_get_buffers(dev, file_priv, &d); + if (d->request_count) { + ret = r128_cce_get_buffers(dev, file_priv, d); } - DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); - return ret; } diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index 6eb59e33..abb99898 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -133,14 +133,14 @@ extern struct drm_ioctl_desc r128_ioctls[]; extern int r128_max_ioctl; /* r128_cce.c */ -extern int r128_cce_init(DRM_IOCTL_ARGS); -extern int r128_cce_start(DRM_IOCTL_ARGS); -extern int r128_cce_stop(DRM_IOCTL_ARGS); -extern int r128_cce_reset(DRM_IOCTL_ARGS); -extern int r128_cce_idle(DRM_IOCTL_ARGS); -extern int r128_engine_reset(DRM_IOCTL_ARGS); -extern int r128_fullscreen(DRM_IOCTL_ARGS); -extern int r128_cce_buffers(DRM_IOCTL_ARGS); +extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void r128_freelist_reset(struct drm_device * dev); diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index 4c244377..b7f483ca 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -1242,25 +1242,21 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) * IOCTL functions */ -static int r128_cce_clear(DRM_IOCTL_ARGS) +static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_r128_clear_t clear; + drm_r128_clear_t *clear = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data, - sizeof(clear)); - RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; - r128_cce_dispatch_clear(dev, &clear); + r128_cce_dispatch_clear(dev, clear); COMMIT_RING(); /* Make sure we restore the 3D state next time. @@ -1310,9 +1306,8 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev) * They can & should be intermixed to support multiple 3d windows. */ -static int r128_cce_flip(DRM_IOCTL_ARGS) +static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1329,9 +1324,8 @@ static int r128_cce_flip(DRM_IOCTL_ARGS) return 0; } -static int r128_cce_swap(DRM_IOCTL_ARGS) +static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1351,14 +1345,13 @@ static int r128_cce_swap(DRM_IOCTL_ARGS) return 0; } -static int r128_cce_vertex(DRM_IOCTL_ARGS) +static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; - drm_r128_vertex_t vertex; + drm_r128_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1367,27 +1360,24 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data, - sizeof(vertex)); - DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", - DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); + DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); - if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { + if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - vertex.idx, dma->buf_count - 1); + vertex->idx, dma->buf_count - 1); return -EINVAL; } - if (vertex.prim < 0 || - vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { - DRM_ERROR("buffer prim %d\n", vertex.prim); + if (vertex->prim < 0 || + vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { + DRM_ERROR("buffer prim %d\n", vertex->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf = dma->buflist[vertex.idx]; + buf = dma->buflist[vertex->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { @@ -1396,13 +1386,13 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) return -EINVAL; } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", vertex.idx); + DRM_ERROR("sending pending buffer %d\n", vertex->idx); return -EINVAL; } - buf->used = vertex.count; - buf_priv->prim = vertex.prim; - buf_priv->discard = vertex.discard; + buf->used = vertex->count; + buf_priv->prim = vertex->prim; + buf_priv->discard = vertex->discard; r128_cce_dispatch_vertex(dev, buf); @@ -1410,14 +1400,13 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) return 0; } -static int r128_cce_indices(DRM_IOCTL_ARGS) +static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; - drm_r128_indices_t elts; + drm_r128_indices_t *elts = data; int count; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1427,26 +1416,24 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data, - sizeof(elts)); - DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, - elts.idx, elts.start, elts.end, elts.discard); + elts->idx, elts->start, elts->end, elts->discard); - if (elts.idx < 0 || elts.idx >= dma->buf_count) { + if (elts->idx < 0 || elts->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - elts.idx, dma->buf_count - 1); + elts->idx, dma->buf_count - 1); return -EINVAL; } - if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { - DRM_ERROR("buffer prim %d\n", elts.prim); + if (elts->prim < 0 || + elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { + DRM_ERROR("buffer prim %d\n", elts->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf = dma->buflist[elts.idx]; + buf = dma->buflist[elts->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { @@ -1455,89 +1442,81 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) return -EINVAL; } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", elts.idx); + DRM_ERROR("sending pending buffer %d\n", elts->idx); return -EINVAL; } - count = (elts.end - elts.start) / sizeof(u16); - elts.start -= R128_INDEX_PRIM_OFFSET; + count = (elts->end - elts->start) / sizeof(u16); + elts->start -= R128_INDEX_PRIM_OFFSET; - if (elts.start & 0x7) { - DRM_ERROR("misaligned buffer 0x%x\n", elts.start); + if (elts->start & 0x7) { + DRM_ERROR("misaligned buffer 0x%x\n", elts->start); return -EINVAL; } - if (elts.start < buf->used) { - DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); + if (elts->start < buf->used) { + DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); return -EINVAL; } - buf->used = elts.end; - buf_priv->prim = elts.prim; - buf_priv->discard = elts.discard; + buf->used = elts->end; + buf_priv->prim = elts->prim; + buf_priv->discard = elts->discard; - r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count); + r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count); COMMIT_RING(); return 0; } -static int r128_cce_blit(DRM_IOCTL_ARGS) +static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_blit_t blit; + drm_r128_blit_t *blit = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data, - sizeof(blit)); + DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx); - DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx); - - if (blit.idx < 0 || blit.idx >= dma->buf_count) { + if (blit->idx < 0 || blit->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - blit.idx, dma->buf_count - 1); + blit->idx, dma->buf_count - 1); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - ret = r128_cce_dispatch_blit(dev, file_priv, &blit); + ret = r128_cce_dispatch_blit(dev, file_priv, blit); COMMIT_RING(); return ret; } -static int r128_cce_depth(DRM_IOCTL_ARGS) +static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_depth_t depth; + drm_r128_depth_t *depth = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data, - sizeof(depth)); - RING_SPACE_TEST_WITH_RETURN(dev_priv); ret = -EINVAL; - switch (depth.func) { + switch (depth->func) { case R128_WRITE_SPAN: - ret = r128_cce_dispatch_write_span(dev, &depth); + ret = r128_cce_dispatch_write_span(dev, depth); break; case R128_WRITE_PIXELS: - ret = r128_cce_dispatch_write_pixels(dev, &depth); + ret = r128_cce_dispatch_write_pixels(dev, depth); break; case R128_READ_SPAN: - ret = r128_cce_dispatch_read_span(dev, &depth); + ret = r128_cce_dispatch_read_span(dev, depth); break; case R128_READ_PIXELS: - ret = r128_cce_dispatch_read_pixels(dev, &depth); + ret = r128_cce_dispatch_read_pixels(dev, depth); break; } @@ -1545,19 +1524,15 @@ static int r128_cce_depth(DRM_IOCTL_ARGS) return ret; } -static int r128_cce_stipple(DRM_IOCTL_ARGS) +static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_stipple_t stipple; + drm_r128_stipple_t *stipple = data; u32 mask[32]; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data, - sizeof(stipple)); - - if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) + if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -1568,14 +1543,13 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS) return 0; } -static int r128_cce_indirect(DRM_IOCTL_ARGS) +static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; - drm_r128_indirect_t indirect; + drm_r128_indirect_t *indirect = data; #if 0 RING_LOCALS; #endif @@ -1587,19 +1561,17 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data, - sizeof(indirect)); - DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", - indirect.idx, indirect.start, indirect.end, indirect.discard); + indirect->idx, indirect->start, indirect->end, + indirect->discard); - if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { + if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - indirect.idx, dma->buf_count - 1); + indirect->idx, dma->buf_count - 1); return -EINVAL; } - buf = dma->buflist[indirect.idx]; + buf = dma->buflist[indirect->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { @@ -1608,21 +1580,21 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) return -EINVAL; } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", indirect.idx); + DRM_ERROR("sending pending buffer %d\n", indirect->idx); return -EINVAL; } - if (indirect.start < buf->used) { + if (indirect->start < buf->used) { DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", - indirect.start, buf->used); + indirect->start, buf->used); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf->used = indirect.end; - buf_priv->discard = indirect.discard; + buf->used = indirect->end; + buf_priv->discard = indirect->discard; #if 0 /* Wait for the 3D stream to idle before the indirect buffer @@ -1637,17 +1609,16 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) * X server. This is insecure and is thus only available to * privileged clients. */ - r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end); + r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end); COMMIT_RING(); return 0; } -static int r128_getparam(DRM_IOCTL_ARGS) +static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_r128_getparam_t param; + drm_r128_getparam_t *param = data; int value; if (!dev_priv) { @@ -1655,12 +1626,9 @@ static int r128_getparam(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data, - sizeof(param)); - DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); - switch (param.param) { + switch (param->param) { case R128_PARAM_IRQ_NR: value = dev->irq; break; @@ -1668,7 +1636,7 @@ static int r128_getparam(DRM_IOCTL_ARGS) return -EINVAL; } - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -1692,23 +1660,23 @@ void r128_driver_lastclose(struct drm_device * dev) } struct drm_ioctl_desc r128_ioctls[] = { - [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), + DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), }; int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index dd87f009..06861381 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -1838,24 +1838,20 @@ static int radeon_do_resume_cp(struct drm_device * dev) return 0; } -int radeon_cp_init(DRM_IOCTL_ARGS) +int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_radeon_init_t init; + drm_radeon_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data, - sizeof(init)); - - if (init.func == RADEON_INIT_R300_CP) + if (init->func == RADEON_INIT_R300_CP) r300_init_reg_flags(); - switch (init.func) { + switch (init->func) { case RADEON_INIT_CP: case RADEON_INIT_R200_CP: case RADEON_INIT_R300_CP: - return radeon_do_init_cp(dev, &init); + return radeon_do_init_cp(dev, init); case RADEON_CLEANUP_CP: return radeon_do_cleanup_cp(dev); } @@ -1863,9 +1859,8 @@ int radeon_cp_init(DRM_IOCTL_ARGS) return -EINVAL; } -int radeon_cp_start(DRM_IOCTL_ARGS) +int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1889,33 +1884,29 @@ int radeon_cp_start(DRM_IOCTL_ARGS) /* Stop the CP. The engine must have been idled before calling this * routine. */ -int radeon_cp_stop(DRM_IOCTL_ARGS) +int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_cp_stop_t stop; + drm_radeon_cp_stop_t *stop = data; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data, - sizeof(stop)); - if (!dev_priv->cp_running) return 0; /* Flush any pending CP commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ - if (stop.flush) { + if (stop->flush) { radeon_do_cp_flush(dev_priv); } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ - if (stop.idle) { + if (stop->idle) { ret = radeon_do_cp_idle(dev_priv); if (ret) return ret; @@ -1983,9 +1974,8 @@ void radeon_do_release(struct drm_device * dev) /* Just reset the CP ring. Called as part of an X Server engine reset. */ -int radeon_cp_reset(DRM_IOCTL_ARGS) +int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -2004,9 +1994,8 @@ int radeon_cp_reset(DRM_IOCTL_ARGS) return 0; } -int radeon_cp_idle(DRM_IOCTL_ARGS) +int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -2017,16 +2006,14 @@ int radeon_cp_idle(DRM_IOCTL_ARGS) /* Added by Charl P. Botha to call radeon_do_resume_cp(). */ -int radeon_cp_resume(DRM_IOCTL_ARGS) +int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; return radeon_do_resume_cp(dev); } -int radeon_engine_reset(DRM_IOCTL_ARGS) +int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -2040,7 +2027,7 @@ int radeon_engine_reset(DRM_IOCTL_ARGS) /* KW: Deprecated to say the least: */ -int radeon_fullscreen(DRM_IOCTL_ARGS) +int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) { return 0; } @@ -2218,42 +2205,36 @@ static int radeon_cp_get_buffers(struct drm_device *dev, return 0; } -int radeon_cp_buffers(DRM_IOCTL_ARGS) +int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; int ret = 0; - struct drm_dma __user *argp = (void __user *)data; - struct drm_dma d; + struct drm_dma *d = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); - /* Please don't send us buffers. */ - if (d.send_count != 0) { + if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", - DRM_CURRENTPID, d.send_count); + DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ - if (d.request_count < 0 || d.request_count > dma->buf_count) { + if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - DRM_CURRENTPID, d.request_count, dma->buf_count); + DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } - d.granted_count = 0; + d->granted_count = 0; - if (d.request_count) { - ret = radeon_cp_get_buffers(dev, file_priv, &d); + if (d->request_count) { + ret = radeon_cp_get_buffers(dev, file_priv, d); } - DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); - return ret; } diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 631fe007..006559df 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -335,15 +335,15 @@ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, } /* radeon_cp.c */ -extern int radeon_cp_init(DRM_IOCTL_ARGS); -extern int radeon_cp_start(DRM_IOCTL_ARGS); -extern int radeon_cp_stop(DRM_IOCTL_ARGS); -extern int radeon_cp_reset(DRM_IOCTL_ARGS); -extern int radeon_cp_idle(DRM_IOCTL_ARGS); -extern int radeon_cp_resume(DRM_IOCTL_ARGS); -extern int radeon_engine_reset(DRM_IOCTL_ARGS); -extern int radeon_fullscreen(DRM_IOCTL_ARGS); -extern int radeon_cp_buffers(DRM_IOCTL_ARGS); +extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_freelist_reset(struct drm_device * dev); extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); @@ -352,16 +352,16 @@ extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); -extern int radeon_mem_alloc(DRM_IOCTL_ARGS); -extern int radeon_mem_free(DRM_IOCTL_ARGS); -extern int radeon_mem_init_heap(DRM_IOCTL_ARGS); +extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_mem_takedown(struct mem_block **heap); extern void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap); /* radeon_irq.c */ -extern int radeon_irq_emit(DRM_IOCTL_ARGS); -extern int radeon_irq_wait(DRM_IOCTL_ARGS); +extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_do_release(struct drm_device * dev); extern int radeon_driver_vblank_wait(struct drm_device * dev, diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 140f9668..1ece6399 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -197,11 +197,10 @@ int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) /* Needs the lock as it touches the ring. */ -int radeon_irq_emit(DRM_IOCTL_ARGS) +int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_irq_emit_t emit; + drm_radeon_irq_emit_t *emit = data; int result; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -211,12 +210,9 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data, - sizeof(emit)); - result = radeon_emit_irq(dev); - if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -226,21 +222,17 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) /* Doesn't need the hardware lock. */ -int radeon_irq_wait(DRM_IOCTL_ARGS) +int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_irq_wait_t irqwait; + drm_radeon_irq_wait_t *irqwait = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, - sizeof(irqwait)); - - return radeon_wait_irq(dev, irqwait.irq_seq); + return radeon_wait_irq(dev, irqwait->irq_seq); } static void radeon_enable_interrupt(struct drm_device *dev) diff --git a/shared-core/radeon_mem.c b/shared-core/radeon_mem.c index 82d454ff..9947e940 100644 --- a/shared-core/radeon_mem.c +++ b/shared-core/radeon_mem.c @@ -217,11 +217,10 @@ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region) } } -int radeon_mem_alloc(DRM_IOCTL_ARGS) +int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_mem_alloc_t alloc; + drm_radeon_mem_alloc_t *alloc = data; struct mem_block *block, **heap; if (!dev_priv) { @@ -229,25 +228,23 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, - sizeof(alloc)); - - heap = get_heap(dev_priv, alloc.region); + heap = get_heap(dev_priv, alloc->region); if (!heap || !*heap) return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. */ - if (alloc.alignment < 12) - alloc.alignment = 12; + if (alloc->alignment < 12) + alloc->alignment = 12; - block = alloc_block(*heap, alloc.size, alloc.alignment, file_priv); + block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); if (!block) return -ENOMEM; - if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { + if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, + sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -255,11 +252,10 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) return 0; } -int radeon_mem_free(DRM_IOCTL_ARGS) +int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_mem_free_t memfree; + drm_radeon_mem_free_t *memfree = data; struct mem_block *block, **heap; if (!dev_priv) { @@ -267,14 +263,11 @@ int radeon_mem_free(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, - sizeof(memfree)); - - heap = get_heap(dev_priv, memfree.region); + heap = get_heap(dev_priv, memfree->region); if (!heap || !*heap) return -EFAULT; - block = find_block(*heap, memfree.region_offset); + block = find_block(*heap, memfree->region_offset); if (!block) return -EFAULT; @@ -285,11 +278,10 @@ int radeon_mem_free(DRM_IOCTL_ARGS) return 0; } -int radeon_mem_init_heap(DRM_IOCTL_ARGS) +int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_mem_init_heap_t initheap; + drm_radeon_mem_init_heap_t *initheap = data; struct mem_block **heap; if (!dev_priv) { @@ -297,11 +289,7 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(initheap, - (drm_radeon_mem_init_heap_t __user *) data, - sizeof(initheap)); - - heap = get_heap(dev_priv, initheap.region); + heap = get_heap(dev_priv, initheap->region); if (!heap) return -EFAULT; @@ -310,5 +298,5 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS) return -EFAULT; } - return init_heap(heap, initheap.start, initheap.size); + return init_heap(heap, initheap->start, initheap->size); } diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 3ca49d6a..ac7f6011 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -2075,71 +2075,58 @@ static void radeon_surfaces_release(struct drm_file *file_priv, /* ================================================================ * IOCTL functions */ -static int radeon_surface_alloc(DRM_IOCTL_ARGS) +static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_surface_alloc_t alloc; + drm_radeon_surface_alloc_t *alloc = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(alloc, - (drm_radeon_surface_alloc_t __user *) data, - sizeof(alloc)); - - if (alloc_surface(&alloc, dev_priv, file_priv) == -1) + if (alloc_surface(alloc, dev_priv, file_priv) == -1) return -EINVAL; else return 0; } -static int radeon_surface_free(DRM_IOCTL_ARGS) +static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_surface_free_t memfree; + drm_radeon_surface_free_t *memfree = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data, - sizeof(memfree)); - - if (free_surface(file_priv, dev_priv, memfree.address)) + if (free_surface(file_priv, dev_priv, memfree->address)) return -EINVAL; else return 0; } -static int radeon_cp_clear(DRM_IOCTL_ARGS) +static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_clear_t clear; + drm_radeon_clear_t *clear = data; drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data, - sizeof(clear)); - RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, + if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]))) return -EFAULT; - radeon_cp_dispatch_clear(dev, &clear, depth_boxes); + radeon_cp_dispatch_clear(dev, clear, depth_boxes); COMMIT_RING(); return 0; @@ -2175,9 +2162,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev) /* Swapping and flipping are different operations, need different ioctls. * They can & should be intermixed to support multiple 3d windows. */ -static int radeon_cp_flip(DRM_IOCTL_ARGS) +static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -2194,9 +2180,8 @@ static int radeon_cp_flip(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_swap(DRM_IOCTL_ARGS) +static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG("\n"); @@ -2215,14 +2200,13 @@ static int radeon_cp_swap(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_vertex(DRM_IOCTL_ARGS) +static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; - drm_radeon_vertex_t vertex; + drm_radeon_vertex_t *vertex = data; drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -2234,26 +2218,23 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) sarea_priv = dev_priv->sarea_priv; - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, - sizeof(vertex)); - DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", - DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); + DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); - if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { + if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - vertex.idx, dma->buf_count - 1); + vertex->idx, dma->buf_count - 1); return -EINVAL; } - if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { - DRM_ERROR("buffer prim %d\n", vertex.prim); + if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { + DRM_ERROR("buffer prim %d\n", vertex->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf = dma->buflist[vertex.idx]; + buf = dma->buflist[vertex->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", @@ -2261,14 +2242,14 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) return -EINVAL; } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", vertex.idx); + DRM_ERROR("sending pending buffer %d\n", vertex->idx); return -EINVAL; } /* Build up a prim_t record: */ - if (vertex.count) { - buf->used = vertex.count; /* not used? */ + if (vertex->count) { + buf->used = vertex->count; /* not used? */ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { if (radeon_emit_state(dev_priv, file_priv, @@ -2286,15 +2267,15 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) } prim.start = 0; - prim.finish = vertex.count; /* unused */ - prim.prim = vertex.prim; - prim.numverts = vertex.count; + prim.finish = vertex->count; /* unused */ + prim.prim = vertex->prim; + prim.numverts = vertex->count; prim.vc_format = dev_priv->sarea_priv->vc_format; radeon_cp_dispatch_vertex(dev, buf, &prim); } - if (vertex.discard) { + if (vertex->discard) { radeon_cp_discard_buffer(dev, buf); } @@ -2302,14 +2283,13 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_indices(DRM_IOCTL_ARGS) +static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; - drm_radeon_indices_t elts; + drm_radeon_indices_t *elts = data; drm_radeon_tcl_prim_t prim; int count; @@ -2321,26 +2301,24 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) } sarea_priv = dev_priv->sarea_priv; - DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data, - sizeof(elts)); - DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", - DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard); + DRM_CURRENTPID, elts->idx, elts->start, elts->end, + elts->discard); - if (elts.idx < 0 || elts.idx >= dma->buf_count) { + if (elts->idx < 0 || elts->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - elts.idx, dma->buf_count - 1); + elts->idx, dma->buf_count - 1); return -EINVAL; } - if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { - DRM_ERROR("buffer prim %d\n", elts.prim); + if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { + DRM_ERROR("buffer prim %d\n", elts->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf = dma->buflist[elts.idx]; + buf = dma->buflist[elts->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", @@ -2348,23 +2326,23 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) return -EINVAL; } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", elts.idx); + DRM_ERROR("sending pending buffer %d\n", elts->idx); return -EINVAL; } - count = (elts.end - elts.start) / sizeof(u16); - elts.start -= RADEON_INDEX_PRIM_OFFSET; + count = (elts->end - elts->start) / sizeof(u16); + elts->start -= RADEON_INDEX_PRIM_OFFSET; - if (elts.start & 0x7) { - DRM_ERROR("misaligned buffer 0x%x\n", elts.start); + if (elts->start & 0x7) { + DRM_ERROR("misaligned buffer 0x%x\n", elts->start); return -EINVAL; } - if (elts.start < buf->used) { - DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); + if (elts->start < buf->used) { + DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); return -EINVAL; } - buf->used = elts.end; + buf->used = elts->end; if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { if (radeon_emit_state(dev_priv, file_priv, @@ -2383,15 +2361,15 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) /* Build up a prim_t record: */ - prim.start = elts.start; - prim.finish = elts.end; - prim.prim = elts.prim; + prim.start = elts->start; + prim.finish = elts->end; + prim.prim = elts->prim; prim.offset = 0; /* offset from start of dma buffers */ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ prim.vc_format = dev_priv->sarea_priv->vc_format; radeon_cp_dispatch_indices(dev, buf, &prim); - if (elts.discard) { + if (elts->discard) { radeon_cp_discard_buffer(dev, buf); } @@ -2399,51 +2377,43 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_texture(DRM_IOCTL_ARGS) +static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_texture_t tex; + drm_radeon_texture_t *tex = data; drm_radeon_tex_image_t image; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data, - sizeof(tex)); - - if (tex.image == NULL) { + if (tex->image == NULL) { DRM_ERROR("null texture image!\n"); return -EINVAL; } if (DRM_COPY_FROM_USER(&image, - (drm_radeon_tex_image_t __user *) tex.image, + (drm_radeon_tex_image_t __user *) tex->image, sizeof(image))) return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - ret = radeon_cp_dispatch_texture(dev, file_priv, &tex, &image); + ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); COMMIT_RING(); return ret; } -static int radeon_cp_stipple(DRM_IOCTL_ARGS) +static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_stipple_t stipple; + drm_radeon_stipple_t *stipple = data; u32 mask[32]; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data, - sizeof(stipple)); - - if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) + if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); @@ -2454,13 +2424,12 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_indirect(DRM_IOCTL_ARGS) +static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; - drm_radeon_indirect_t indirect; + drm_radeon_indirect_t *indirect = data; RING_LOCALS; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -2470,20 +2439,17 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(indirect, - (drm_radeon_indirect_t __user *) data, - sizeof(indirect)); - DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", - indirect.idx, indirect.start, indirect.end, indirect.discard); + indirect->idx, indirect->start, indirect->end, + indirect->discard); - if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { + if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - indirect.idx, dma->buf_count - 1); + indirect->idx, dma->buf_count - 1); return -EINVAL; } - buf = dma->buflist[indirect.idx]; + buf = dma->buflist[indirect->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", @@ -2491,20 +2457,20 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) return -EINVAL; } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", indirect.idx); + DRM_ERROR("sending pending buffer %d\n", indirect->idx); return -EINVAL; } - if (indirect.start < buf->used) { + if (indirect->start < buf->used) { DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", - indirect.start, buf->used); + indirect->start, buf->used); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf->used = indirect.end; + buf->used = indirect->end; /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. @@ -2519,8 +2485,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) * X server. This is insecure and is thus only available to * privileged clients. */ - radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end); - if (indirect.discard) { + radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); + if (indirect->discard) { radeon_cp_discard_buffer(dev, buf); } @@ -2528,14 +2494,13 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_vertex2(DRM_IOCTL_ARGS) +static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; - drm_radeon_vertex2_t vertex; + drm_radeon_vertex2_t *vertex = data; int i; unsigned char laststate; @@ -2548,22 +2513,19 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) sarea_priv = dev_priv->sarea_priv; - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data, - sizeof(vertex)); - DRM_DEBUG("pid=%d index=%d discard=%d\n", - DRM_CURRENTPID, vertex.idx, vertex.discard); + DRM_CURRENTPID, vertex->idx, vertex->discard); - if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { + if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", - vertex.idx, dma->buf_count - 1); + vertex->idx, dma->buf_count - 1); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - buf = dma->buflist[vertex.idx]; + buf = dma->buflist[vertex->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", @@ -2572,25 +2534,25 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) } if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", vertex.idx); + DRM_ERROR("sending pending buffer %d\n", vertex->idx); return -EINVAL; } if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) return -EINVAL; - for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { + for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) { drm_radeon_prim_t prim; drm_radeon_tcl_prim_t tclprim; - if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) + if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim))) return -EFAULT; if (prim.stateidx != laststate) { drm_radeon_state_t state; if (DRM_COPY_FROM_USER(&state, - &vertex.state[prim.stateidx], + &vertex->state[prim.stateidx], sizeof(state))) return -EFAULT; @@ -2623,7 +2585,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) sarea_priv->nbox = 0; } - if (vertex.discard) { + if (vertex->discard) { radeon_cp_discard_buffer(dev, buf); } @@ -2874,14 +2836,13 @@ static int radeon_emit_wait(struct drm_device * dev, int flags) return 0; } -static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) +static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf = NULL; int idx; - drm_radeon_kcmd_buffer_t cmdbuf; + drm_radeon_kcmd_buffer_t *cmdbuf = data; drm_radeon_cmd_header_t header; int orig_nbox, orig_bufsz; char *kbuf = NULL; @@ -2893,14 +2854,10 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(cmdbuf, - (drm_radeon_kcmd_buffer_t __user *) data, - sizeof(cmdbuf)); - RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); - if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { + if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) { return -EINVAL; } @@ -2908,24 +2865,24 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) * races between checking values and using those values in other code, * and simply to avoid a lot of function calls to copy in data. */ - orig_bufsz = cmdbuf.bufsz; + orig_bufsz = cmdbuf->bufsz; if (orig_bufsz != 0) { - kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); + kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); if (kbuf == NULL) return -ENOMEM; - if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, - cmdbuf.bufsz)) { + if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, + cmdbuf->bufsz)) { drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); return -EFAULT; } - cmdbuf.buf = kbuf; + cmdbuf->buf = kbuf; } - orig_nbox = cmdbuf.nbox; + orig_nbox = cmdbuf->nbox; if (dev_priv->microcode_version == UCODE_R300) { int temp; - temp = r300_do_cp_cmdbuf(dev, file_priv, &cmdbuf); + temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); if (orig_bufsz != 0) drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); @@ -2934,17 +2891,17 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) } /* microcode_version != r300 */ - while (cmdbuf.bufsz >= sizeof(header)) { + while (cmdbuf->bufsz >= sizeof(header)) { - header.i = *(int *)cmdbuf.buf; - cmdbuf.buf += sizeof(header); - cmdbuf.bufsz -= sizeof(header); + header.i = *(int *)cmdbuf->buf; + cmdbuf->buf += sizeof(header); + cmdbuf->bufsz -= sizeof(header); switch (header.header.cmd_type) { case RADEON_CMD_PACKET: DRM_DEBUG("RADEON_CMD_PACKET\n"); if (radeon_emit_packets - (dev_priv, file_priv, header, &cmdbuf)) { + (dev_priv, file_priv, header, cmdbuf)) { DRM_ERROR("radeon_emit_packets failed\n"); goto err; } @@ -2952,7 +2909,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_SCALARS: DRM_DEBUG("RADEON_CMD_SCALARS\n"); - if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) { + if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { DRM_ERROR("radeon_emit_scalars failed\n"); goto err; } @@ -2960,7 +2917,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_VECTORS: DRM_DEBUG("RADEON_CMD_VECTORS\n"); - if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) { + if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { DRM_ERROR("radeon_emit_vectors failed\n"); goto err; } @@ -2988,7 +2945,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_PACKET3: DRM_DEBUG("RADEON_CMD_PACKET3\n"); - if (radeon_emit_packet3(dev, file_priv, &cmdbuf)) { + if (radeon_emit_packet3(dev, file_priv, cmdbuf)) { DRM_ERROR("radeon_emit_packet3 failed\n"); goto err; } @@ -2997,7 +2954,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_PACKET3_CLIP: DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); if (radeon_emit_packet3_cliprect - (dev, file_priv, &cmdbuf, orig_nbox)) { + (dev, file_priv, cmdbuf, orig_nbox)) { DRM_ERROR("radeon_emit_packet3_clip failed\n"); goto err; } @@ -3005,7 +2962,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) case RADEON_CMD_SCALARS2: DRM_DEBUG("RADEON_CMD_SCALARS2\n"); - if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) { + if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { DRM_ERROR("radeon_emit_scalars2 failed\n"); goto err; } @@ -3020,7 +2977,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) break; case RADEON_CMD_VECLINEAR: DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); - if (radeon_emit_veclinear(dev_priv, header, &cmdbuf)) { + if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { DRM_ERROR("radeon_emit_veclinear failed\n"); goto err; } @@ -3029,7 +2986,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) default: DRM_ERROR("bad cmd_type %d at %p\n", header.header.cmd_type, - cmdbuf.buf - sizeof(header)); + cmdbuf->buf - sizeof(header)); goto err; } } @@ -3047,11 +3004,10 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) return -EINVAL; } -static int radeon_cp_getparam(DRM_IOCTL_ARGS) +static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_getparam_t param; + drm_radeon_getparam_t *param = data; int value; if (!dev_priv) { @@ -3059,12 +3015,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data, - sizeof(param)); - DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); - switch (param.param) { + switch (param->param) { case RADEON_PARAM_GART_BUFFER_OFFSET: value = dev_priv->gart_buffers_offset; break; @@ -3127,11 +3080,11 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) value = radeon_vblank_crtc_get(dev); break; default: - DRM_DEBUG( "Invalid parameter %d\n", param.param ); + DRM_DEBUG( "Invalid parameter %d\n", param->param ); return -EINVAL; } - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } @@ -3139,11 +3092,10 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) return 0; } -static int radeon_cp_setparam(DRM_IOCTL_ARGS) +static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_setparam_t sp; + drm_radeon_setparam_t *sp = data; struct drm_radeon_driver_file_fields *radeon_priv; if (!dev_priv) { @@ -3151,21 +3103,19 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data, - sizeof(sp)); - - switch (sp.param) { + switch (sp->param) { case RADEON_SETPARAM_FB_LOCATION: radeon_priv = file_priv->driver_priv; - radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value; + radeon_priv->radeon_fb_delta = dev_priv->fb_location - + sp->value; break; case RADEON_SETPARAM_SWITCH_TILING: - if (sp.value == 0) { + if (sp->value == 0) { DRM_DEBUG("color tiling disabled\n"); dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; dev_priv->sarea_priv->tiling_enabled = 0; - } else if (sp.value == 1) { + } else if (sp->value == 1) { DRM_DEBUG("color tiling enabled\n"); dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; @@ -3173,22 +3123,22 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) } break; case RADEON_SETPARAM_PCIGART_LOCATION: - dev_priv->pcigart_offset = sp.value; + dev_priv->pcigart_offset = sp->value; dev_priv->pcigart_offset_set = 1; break; case RADEON_SETPARAM_NEW_MEMMAP: - dev_priv->new_memmap = sp.value; + dev_priv->new_memmap = sp->value; break; case RADEON_SETPARAM_PCIGART_TABLE_SIZE: - dev_priv->gart_info.table_size = sp.value; + dev_priv->gart_info.table_size = sp->value; if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; break; case RADEON_SETPARAM_VBLANK_CRTC: - return radeon_vblank_crtc_set(dev, sp.value); + return radeon_vblank_crtc_set(dev, sp->value); break; default: - DRM_DEBUG("Invalid parameter %d\n", sp.param); + DRM_DEBUG("Invalid parameter %d\n", sp->param); return -EINVAL; } @@ -3258,33 +3208,33 @@ void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv } struct drm_ioctl_desc radeon_ioctls[] = { - [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH} + DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH) }; int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index c2dee6f9..32ac5ac2 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -927,19 +927,15 @@ static int savage_do_cleanup_bci(struct drm_device *dev) return 0; } -static int savage_bci_init(DRM_IOCTL_ARGS) +static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_savage_init_t init; + drm_savage_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case SAVAGE_INIT_BCI: - return savage_do_init_bci(dev, &init); + return savage_do_init_bci(dev, init); case SAVAGE_CLEANUP_BCI: return savage_do_cleanup_bci(dev); } @@ -947,39 +943,30 @@ static int savage_bci_init(DRM_IOCTL_ARGS) return -EINVAL; } -static int savage_bci_event_emit(DRM_IOCTL_ARGS) +static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_savage_private_t *dev_priv = dev->dev_private; - drm_savage_event_emit_t event; + drm_savage_event_emit_t *event = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data, - sizeof(event)); + event->count = savage_bci_emit_event(dev_priv, event->flags); + event->count |= dev_priv->event_wrap << 16; - event.count = savage_bci_emit_event(dev_priv, event.flags); - event.count |= dev_priv->event_wrap << 16; - DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *)data, - event, sizeof(event)); return 0; } -static int savage_bci_event_wait(DRM_IOCTL_ARGS) +static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_savage_private_t *dev_priv = dev->dev_private; - drm_savage_event_wait_t event; + drm_savage_event_wait_t *event = data; unsigned int event_e, hw_e; unsigned int event_w, hw_w; DRM_DEBUG("\n"); - DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data, - sizeof(event)); - UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) hw_e = dev_priv->status_ptr[1] & 0xffff; @@ -989,8 +976,8 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS) if (hw_e > dev_priv->event_counter) hw_w--; /* hardware hasn't passed the last wrap yet */ - event_e = event.count & 0xffff; - event_w = event.count >> 16; + event_e = event->count & 0xffff; + event_w = event->count >> 16; /* Don't need to wait if * - event counter wrapped since the event was emitted or @@ -1032,41 +1019,36 @@ static int savage_bci_get_buffers(struct drm_device *dev, return 0; } -int savage_bci_buffers(DRM_IOCTL_ARGS) +int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; struct drm_device_dma *dma = dev->dma; - struct drm_dma d; + struct drm_dma *d = data; int ret = 0; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *)data, sizeof(d)); - /* Please don't send us buffers. */ - if (d.send_count != 0) { + if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", - DRM_CURRENTPID, d.send_count); + DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ - if (d.request_count < 0 || d.request_count > dma->buf_count) { + if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", - DRM_CURRENTPID, d.request_count, dma->buf_count); + DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } - d.granted_count = 0; + d->granted_count = 0; - if (d.request_count) { - ret = savage_bci_get_buffers(dev, file_priv, &d); + if (d->request_count) { + ret = savage_bci_get_buffers(dev, file_priv, d); } - DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *)data, d, sizeof(d)); - return ret; } @@ -1103,10 +1085,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) } struct drm_ioctl_desc savage_ioctls[] = { - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), }; int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 3208cfcc..d86bac04 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -197,8 +197,8 @@ typedef struct drm_savage_private { } drm_savage_private_t; /* ioctls */ -extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS); -extern int savage_bci_buffers(DRM_IOCTL_ARGS); +extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); /* BCI functions */ extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index f5b9888c..dd593340 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -952,13 +952,12 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv, return 0; } -int savage_bci_cmdbuf(DRM_IOCTL_ARGS) +int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *dmabuf; - drm_savage_cmdbuf_t cmdbuf; + drm_savage_cmdbuf_t *cmdbuf = data; drm_savage_cmd_header_t *kcmd_addr = NULL; drm_savage_cmd_header_t *first_draw_cmd; unsigned int *kvb_addr = NULL; @@ -970,17 +969,14 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data, - sizeof(cmdbuf)); - if (dma && dma->buflist) { - if (cmdbuf.dma_idx > dma->buf_count) { + if (cmdbuf->dma_idx > dma->buf_count) { DRM_ERROR ("vertex buffer index %u out of range (0-%u)\n", - cmdbuf.dma_idx, dma->buf_count-1); + cmdbuf->dma_idx, dma->buf_count-1); return -EINVAL; } - dmabuf = dma->buflist[cmdbuf.dma_idx]; + dmabuf = dma->buflist[cmdbuf->dma_idx]; } else { dmabuf = NULL; } @@ -990,47 +986,49 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct * for locking on FreeBSD. */ - if (cmdbuf.size) { - kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); + if (cmdbuf->size) { + kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); if (kcmd_addr == NULL) return -ENOMEM; - if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, - cmdbuf.size * 8)) + if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, + cmdbuf->size * 8)) { - drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); + drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); return -EFAULT; } - cmdbuf.cmd_addr = kcmd_addr; + cmdbuf->cmd_addr = kcmd_addr; } - if (cmdbuf.vb_size) { - kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); + if (cmdbuf->vb_size) { + kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); if (kvb_addr == NULL) { ret = -ENOMEM; goto done; } - if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, - cmdbuf.vb_size)) { + if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, + cmdbuf->vb_size)) { ret = -EFAULT; goto done; } - cmdbuf.vb_addr = kvb_addr; + cmdbuf->vb_addr = kvb_addr; } - if (cmdbuf.nbox) { - kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + if (cmdbuf->nbox) { + kbox_addr = drm_alloc(cmdbuf->nbox * + sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); if (kbox_addr == NULL) { ret = -ENOMEM; goto done; } - if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, - cmdbuf.nbox * sizeof(struct drm_clip_rect))) { + if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, + cmdbuf->nbox * + sizeof(struct drm_clip_rect))) { ret = -EFAULT; goto done; } - cmdbuf.box_addr = kbox_addr; + cmdbuf->box_addr = kbox_addr; } /* Make sure writes to DMA buffers are finished before sending @@ -1043,10 +1041,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) i = 0; first_draw_cmd = NULL; - while (i < cmdbuf.size) { + while (i < cmdbuf->size) { drm_savage_cmd_header_t cmd_header; - cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; - cmdbuf.cmd_addr++; + cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr; + cmdbuf->cmd_addr++; i++; /* Group drawing commands with same state to minimize @@ -1056,7 +1054,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) case SAVAGE_CMD_DMA_IDX: case SAVAGE_CMD_VB_IDX: j = (cmd_header.idx.count + 3) / 4; - if (i + j > cmdbuf.size) { + if (i + j > cmdbuf->size) { DRM_ERROR("indexed drawing command extends " "beyond end of command buffer\n"); DMA_FLUSH(); @@ -1066,18 +1064,19 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) case SAVAGE_CMD_DMA_PRIM: case SAVAGE_CMD_VB_PRIM: if (!first_draw_cmd) - first_draw_cmd = cmdbuf.cmd_addr-1; - cmdbuf.cmd_addr += j; + first_draw_cmd = cmdbuf->cmd_addr-1; + cmdbuf->cmd_addr += j; i += j; break; default: if (first_draw_cmd) { ret = savage_dispatch_draw ( dev_priv, first_draw_cmd, - cmdbuf.cmd_addr-1, - dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, - cmdbuf.vb_stride, - cmdbuf.nbox, cmdbuf.box_addr); + cmdbuf->cmd_addr-1, + dmabuf, cmdbuf->vb_addr, + cmdbuf->vb_size, + cmdbuf->vb_stride, + cmdbuf->nbox, cmdbuf->box_addr); if (ret != 0) return ret; first_draw_cmd = NULL; @@ -1089,7 +1088,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) switch (cmd_header.cmd.cmd) { case SAVAGE_CMD_STATE: j = (cmd_header.state.count + 1) / 2; - if (i + j > cmdbuf.size) { + if (i + j > cmdbuf->size) { DRM_ERROR("command SAVAGE_CMD_STATE extends " "beyond end of command buffer\n"); DMA_FLUSH(); @@ -1097,12 +1096,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) goto done; } ret = savage_dispatch_state(dev_priv, &cmd_header, - (const uint32_t *)cmdbuf.cmd_addr); - cmdbuf.cmd_addr += j; + (const uint32_t *)cmdbuf->cmd_addr); + cmdbuf->cmd_addr += j; i += j; break; case SAVAGE_CMD_CLEAR: - if (i + 1 > cmdbuf.size) { + if (i + 1 > cmdbuf->size) { DRM_ERROR("command SAVAGE_CMD_CLEAR extends " "beyond end of command buffer\n"); DMA_FLUSH(); @@ -1110,17 +1109,19 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) goto done; } ret = savage_dispatch_clear(dev_priv, &cmd_header, - cmdbuf.cmd_addr, - cmdbuf.nbox, cmdbuf.box_addr); - cmdbuf.cmd_addr++; + cmdbuf->cmd_addr, + cmdbuf->nbox, + cmdbuf->box_addr); + cmdbuf->cmd_addr++; i++; break; case SAVAGE_CMD_SWAP: - ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, - cmdbuf.box_addr); + ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox, + cmdbuf->box_addr); break; default: - DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); + DRM_ERROR("invalid command 0x%x\n", + cmd_header.cmd.cmd); DMA_FLUSH(); ret = -EINVAL; goto done; @@ -1134,9 +1135,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) if (first_draw_cmd) { ret = savage_dispatch_draw ( - dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, - cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, - cmdbuf.nbox, cmdbuf.box_addr); + dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, + cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, + cmdbuf->nbox, cmdbuf->box_addr); if (ret != 0) { DMA_FLUSH(); goto done; @@ -1145,7 +1146,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) DMA_FLUSH(); - if (dmabuf && cmdbuf.discard) { + if (dmabuf && cmdbuf->discard) { drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; uint16_t event; event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); @@ -1155,9 +1156,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) done: /* If we didn't need to allocate them, these'll be NULL */ - drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); - drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); - drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect), + drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); + drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); + drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); return ret; diff --git a/shared-core/sis_mm.c b/shared-core/sis_mm.c index b62e3e27..e11939fa 100644 --- a/shared-core/sis_mm.c +++ b/shared-core/sis_mm.c @@ -81,59 +81,52 @@ static int del_alloc_set(int context, int type, unsigned int val) /* fb management via fb device */ #if defined(__linux__) && defined(CONFIG_FB_SIS) -static int sis_fb_init(DRM_IOCTL_ARGS) +static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { return 0; } -static int sis_fb_alloc(DRM_IOCTL_ARGS) +static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_sis_mem_t fb; + drm_sis_mem_t *fb = data; struct sis_memreq req; - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; int retval = 0; - DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); - - req.size = fb.size; + req.size = fb->size; sis_malloc(&req); if (req.offset) { /* TODO */ - fb.offset = req.offset; - fb.free = req.offset; - if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { + fb->offset = req.offset; + fb->free = req.offset; + if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) { DRM_DEBUG("adding to allocation set fails\n"); sis_free(req.offset); retval = -EINVAL; } } else { - fb.offset = 0; - fb.size = 0; - fb.free = 0; + fb->offset = 0; + fb->size = 0; + fb->free = 0; } - DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); - - DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset); + DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb->size, req.offset); return retval; } -static int sis_fb_free(DRM_IOCTL_ARGS) +static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_sis_mem_t fb; int retval = 0; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb)); - - if (!fb.free) + if (!fb->free) return -EINVAL; - if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) + if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free)) retval = -EINVAL; - sis_free(fb.free); + sis_free(fb->free); - DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free); + DRM_DEBUG("free fb, offset = 0x%lx\n", fb->free); return retval; } @@ -150,13 +143,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS) * X driver/sisfb HW- Command- * framebuffer memory DRI heap Cursor queue */ -static int sis_fb_init(DRM_IOCTL_ARGS) +static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_fb_t fb; - - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb)); + drm_sis_fb_t *fb = data; if (dev_priv == NULL) { dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t), @@ -169,69 +159,60 @@ static int sis_fb_init(DRM_IOCTL_ARGS) if (dev_priv->FBHeap != NULL) return -EINVAL; - dev_priv->FBHeap = mmInit(fb.offset, fb.size); + dev_priv->FBHeap = mmInit(fb->offset, fb->size); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; } -static int sis_fb_alloc(DRM_IOCTL_ARGS) +static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; - drm_sis_mem_t fb; + drm_sis_mem_t *fb = data; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->FBHeap == NULL) return -EINVAL; - DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); - - block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0); + block = mmAllocMem(dev_priv->FBHeap, fb->size, 0, 0); if (block) { /* TODO */ - fb.offset = block->ofs; - fb.free = (unsigned long)block; - if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { + fb->offset = block->ofs; + fb->free = (unsigned long)block; + if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) { DRM_DEBUG("adding to allocation set fails\n"); - mmFreeMem((PMemBlock) fb.free); + mmFreeMem((PMemBlock) fb->free); retval = -EINVAL; } } else { - fb.offset = 0; - fb.size = 0; - fb.free = 0; + fb->offset = 0; + fb->size = 0; + fb->free = 0; } - DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); - - DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset); + DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb->size, fb->offset); return retval; } -static int sis_fb_free(DRM_IOCTL_ARGS) +static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t fb; + drm_sis_mem_t *fb = data; if (dev_priv == NULL || dev_priv->FBHeap == NULL) return -EINVAL; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb)); - - if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free)) + if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb->free)) return -EINVAL; - if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) + if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free)) return -EINVAL; - mmFreeMem((PMemBlock) fb.free); + mmFreeMem((PMemBlock) fb->free); - DRM_DEBUG("free fb, free = 0x%lx\n", fb.free); + DRM_DEBUG("free fb, free = 0x%lx\n", fb->free); return 0; } @@ -240,11 +221,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS) /* agp memory management */ -static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_agp_t agp; + drm_sis_agp_t *agp = data; if (dev_priv == NULL) { dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t), @@ -257,73 +237,61 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) if (dev_priv->AGPHeap != NULL) return -EINVAL; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, - sizeof(agp)); + dev_priv->AGPHeap = mmInit(agp->offset, agp->size); - dev_priv->AGPHeap = mmInit(agp.offset, agp.size); - - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; - drm_sis_mem_t agp; + drm_sis_mem_t *agp = data; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) return -EINVAL; - DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp)); - - block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0); + block = mmAllocMem(dev_priv->AGPHeap, agp->size, 0, 0); if (block) { /* TODO */ - agp.offset = block->ofs; - agp.free = (unsigned long)block; - if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) { + agp->offset = block->ofs; + agp->free = (unsigned long)block; + if (!add_alloc_set(agp->context, AGP_TYPE, agp->free)) { DRM_DEBUG("adding to allocation set fails\n"); - mmFreeMem((PMemBlock) agp.free); + mmFreeMem((PMemBlock) agp->free); retval = -1; } } else { - agp.offset = 0; - agp.size = 0; - agp.free = 0; + agp->offset = 0; + agp->size = 0; + agp->free = 0; } - DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp)); - - DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset); + DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp->size, + agp->offset); return retval; } -static int sis_ioctl_agp_free(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t agp; + drm_sis_mem_t *agp = data; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) return -EINVAL; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data, - sizeof(agp)); - - if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free)) + if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp->free)) return -EINVAL; - mmFreeMem((PMemBlock) agp.free); - if (!del_alloc_set(agp.context, AGP_TYPE, agp.free)) + mmFreeMem((PMemBlock) agp->free); + if (!del_alloc_set(agp->context, AGP_TYPE, agp->free)) return -EINVAL; - DRM_DEBUG("free agp, free = 0x%lx\n", agp.free); + DRM_DEBUG("free agp, free = 0x%lx\n", agp->free); return 0; } @@ -407,12 +375,12 @@ int sis_final_context(struct drm_device *dev, int context) } drm_ioctl_desc_t sis_ioctls[] = { - [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY} + DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_fb_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_ioctl_agp_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY) }; int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 7fe6d019..bd737a7e 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -215,22 +215,18 @@ static int via_initialize(struct drm_device * dev, return 0; } -static int via_dma_init(DRM_IOCTL_ARGS) +static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_via_dma_init_t init; + drm_via_dma_init_t *init = data; int retcode = 0; - DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case VIA_INIT_DMA: if (!DRM_SUSER(DRM_CURPROC)) retcode = -EPERM; else - retcode = via_initialize(dev, dev_priv, &init); + retcode = via_initialize(dev, dev_priv, init); break; case VIA_CLEANUP_DMA: if (!DRM_SUSER(DRM_CURPROC)) @@ -316,29 +312,25 @@ int via_driver_dma_quiescent(struct drm_device * dev) return 0; } -static int via_flush_ioctl(DRM_IOCTL_ARGS) +static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; LOCK_TEST_WITH_RETURN(dev, file_priv); return via_driver_dma_quiescent(dev); } -static int via_cmdbuffer(DRM_IOCTL_ARGS) +static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_cmdbuffer_t cmdbuf; + drm_via_cmdbuffer_t *cmdbuf = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, - sizeof(cmdbuf)); + DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf, + cmdbuf->size); - DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size); - - ret = via_dispatch_cmdbuffer(dev, &cmdbuf); + ret = via_dispatch_cmdbuffer(dev, cmdbuf); if (ret) { return ret; } @@ -370,21 +362,17 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, return ret; } -static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) +static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_cmdbuffer_t cmdbuf; + drm_via_cmdbuffer_t *cmdbuf = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, - sizeof(cmdbuf)); + DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf, + cmdbuf->size); - DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, - cmdbuf.size); - - ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf); + ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf); if (ret) { return ret; } @@ -645,10 +633,9 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv) * User interface to the space and lag functions. */ -static int via_cmdbuf_size(DRM_IOCTL_ARGS) +static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_cmdbuf_size_t d_siz; + drm_via_cmdbuf_size_t *d_siz = data; int ret = 0; uint32_t tmp_size, count; drm_via_private_t *dev_priv; @@ -664,17 +651,13 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) return -EFAULT; } - DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data, - sizeof(d_siz)); - - count = 1000000; - tmp_size = d_siz.size; - switch(d_siz.func) { + tmp_size = d_siz->size; + switch(d_siz->func) { case VIA_CMDBUF_SPACE: - while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) + while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) && count--) { - if (!d_siz.wait) { + if (!d_siz->wait) { break; } } @@ -684,9 +667,9 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) } break; case VIA_CMDBUF_LAG: - while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) + while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) && count--) { - if (!d_siz.wait) { + if (!d_siz->wait) { break; } } @@ -698,41 +681,39 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) default: ret = -EFAULT; } - d_siz.size = tmp_size; + d_siz->size = tmp_size; - DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz, - sizeof(d_siz)); return ret; } #ifndef VIA_HAVE_DMABLIT int -via_dma_blit_sync( DRM_IOCTL_ARGS ) { +via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) { DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n"); return -EINVAL; } int -via_dma_blit( DRM_IOCTL_ARGS ) { +via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) { DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n"); return -EINVAL; } #endif struct drm_ioctl_desc via_ioctls[] = { - [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, - [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER}, - [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER}, - [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH} + DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), + DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) }; int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index b7997942..15e65950 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -151,15 +151,15 @@ enum via_family { extern struct drm_ioctl_desc via_ioctls[]; extern int via_max_ioctl; -extern int via_fb_init(DRM_IOCTL_ARGS); -extern int via_mem_alloc(DRM_IOCTL_ARGS); -extern int via_mem_free(DRM_IOCTL_ARGS); -extern int via_agp_init(DRM_IOCTL_ARGS); -extern int via_map_init(DRM_IOCTL_ARGS); -extern int via_decoder_futex(DRM_IOCTL_ARGS); -extern int via_wait_irq(DRM_IOCTL_ARGS); -extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); -extern int via_dma_blit( DRM_IOCTL_ARGS ); +extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ); +extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ); extern int via_driver_load(struct drm_device *dev, unsigned long chipset); extern int via_driver_unload(struct drm_device *dev); diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index 68ee5226..475b6461 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -340,11 +340,9 @@ void via_driver_irq_uninstall(struct drm_device * dev) } } -int via_wait_irq(DRM_IOCTL_ARGS) +int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_irqwait_t __user *argp = (void __user *)data; - drm_via_irqwait_t irqwait; + drm_via_irqwait_t *irqwait = data; struct timeval now; int ret = 0; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -354,44 +352,42 @@ int via_wait_irq(DRM_IOCTL_ARGS) if (!dev->irq) return -EINVAL; - DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); - if (irqwait.request.irq >= dev_priv->num_irqs) { + if (irqwait->request.irq >= dev_priv->num_irqs) { DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, - irqwait.request.irq); + irqwait->request.irq); return -EINVAL; } - cur_irq += irqwait.request.irq; + cur_irq += irqwait->request.irq; - switch (irqwait.request.type & ~VIA_IRQ_FLAGS_MASK) { + switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: - irqwait.request.sequence += atomic_read(&cur_irq->irq_received); - irqwait.request.type &= ~_DRM_VBLANK_RELATIVE; + irqwait->request.sequence += + atomic_read(&cur_irq->irq_received); + irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; default: return -EINVAL; } - if (irqwait.request.type & VIA_IRQ_SIGNAL) { + if (irqwait->request.type & VIA_IRQ_SIGNAL) { DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", __FUNCTION__); return -EINVAL; } - force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); + force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); - ret = via_driver_irq_wait(dev, irqwait.request.irq, force_sequence, - &irqwait.request.sequence); + ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, + &irqwait->request.sequence); #ifdef __linux__ do_gettimeofday(&now); #else microtime(&now); #endif - irqwait.reply.tval_sec = now.tv_sec; - irqwait.reply.tval_usec = now.tv_usec; - - DRM_COPY_TO_USER_IOCTL(argp, irqwait, sizeof(irqwait)); + irqwait->reply.tval_sec = now.tv_sec; + irqwait->reply.tval_usec = now.tv_usec; return ret; } diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 1aed10f5..1623df68 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -91,19 +91,15 @@ int via_do_cleanup_map(struct drm_device * dev) } -int via_map_init(DRM_IOCTL_ARGS) +int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_init_t init; + drm_via_init_t *init = data; DRM_DEBUG("%s\n", __FUNCTION__); - DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t __user *) data, - sizeof(init)); - - switch (init.func) { + switch (init->func) { case VIA_INIT_MAP: - return via_do_init_map(dev, &init); + return via_do_init_map(dev, init); case VIA_CLEANUP_MAP: return via_do_cleanup_map(dev); } diff --git a/shared-core/via_mm.c b/shared-core/via_mm.c index 8f175a7d..45790dc2 100644 --- a/shared-core/via_mm.c +++ b/shared-core/via_mm.c @@ -72,17 +72,14 @@ static int del_alloc_set(int context, int type, unsigned long val) /* agp memory management */ static memHeap_t *AgpHeap = NULL; -int via_agp_init(DRM_IOCTL_ARGS) +int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_via_agp_t agp; + drm_via_agp_t *agp = data; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data, - sizeof(agp)); + AgpHeap = via_mmInit(agp->offset, agp->size); - AgpHeap = via_mmInit(agp.offset, agp.size); - - DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)agp.offset, - (unsigned long)agp.size); + DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)agp->offset, + (unsigned long)agp->size); return 0; } @@ -90,11 +87,9 @@ int via_agp_init(DRM_IOCTL_ARGS) /* fb memory management */ static memHeap_t *FBHeap = NULL; -int via_fb_init(DRM_IOCTL_ARGS) +int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_via_fb_t fb; - - DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb)); + drm_via_fb_t *fb = data; FBHeap = via_mmInit(fb.offset, fb.size); @@ -191,25 +186,18 @@ int via_final_context(struct drm_device *dev, int context) return 1; } -int via_mem_alloc(DRM_IOCTL_ARGS) +int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_via_mem_t mem; - - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); + drm_via_mem_t *mem = data; switch (mem.type) { case VIA_MEM_VIDEO: - if (via_fb_alloc(&mem) < 0) + if (via_fb_alloc(mem) < 0) return -EFAULT; - DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, - sizeof(mem)); return 0; case VIA_MEM_AGP: - if (via_agp_alloc(&mem) < 0) + if (via_agp_alloc(mem) < 0) return -EFAULT; - DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, - sizeof(mem)); return 0; } @@ -288,21 +276,18 @@ static int via_agp_alloc(drm_via_mem_t * mem) return retval; } -int via_mem_free(DRM_IOCTL_ARGS) +int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_via_mem_t mem; + drm_via_mem_t *mem = data; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - - switch (mem.type) { + switch (mem->type) { case VIA_MEM_VIDEO: - if (via_fb_free(&mem) == 0) + if (via_fb_free(mem) == 0) return 0; break; case VIA_MEM_AGP: - if (via_agp_free(&mem) == 0) + if (via_agp_free(mem) == 0) return 0; break; } @@ -356,7 +341,7 @@ static int via_agp_free(drm_via_mem_t * mem) retval = -1; } - DRM_DEBUG("free agp, free = %ld\n", agp.free); + DRM_DEBUG("free agp, free = %ld\n", agp.nfree); return retval; } diff --git a/shared-core/via_video.c b/shared-core/via_video.c index 300ac61b..c15e75b5 100644 --- a/shared-core/via_video.c +++ b/shared-core/via_video.c @@ -65,10 +65,9 @@ void via_release_futex(drm_via_private_t * dev_priv, int context) } } -int via_decoder_futex(DRM_IOCTL_ARGS) +int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_futex_t fx; + drm_via_futex_t *fx = data; volatile int *lock; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_via_sarea_t *sAPriv = dev_priv->sarea_priv; @@ -76,21 +75,18 @@ int via_decoder_futex(DRM_IOCTL_ARGS) DRM_DEBUG("%s\n", __FUNCTION__); - DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t __user *) data, - sizeof(fx)); - - if (fx.lock > VIA_NR_XVMC_LOCKS) + if (fx->lock > VIA_NR_XVMC_LOCKS) return -EFAULT; - lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock); + lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); - switch (fx.func) { + switch (fx->func) { case VIA_FUTEX_WAIT: - DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock], - (fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val); + DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], + (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val); return ret; case VIA_FUTEX_WAKE: - DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock])); + DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock])); return 0; } return 0; From f68ad6d1abdce7d3c11cc2e90745c0d1e565fe77 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sat, 21 Jul 2007 21:50:25 +1000 Subject: [PATCH 166/437] fix drm no-compile due to BSD :-) --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 85e3ba47..dba0a7ba 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -599,7 +599,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) ioctl = &drm_ioctls[nr]; else { - errno = -EINVAL; + retcode = -EINVAL; goto err_i1; } From b43b0b2b32a31bcb81042659ffcc95b8975e42cf Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sat, 21 Jul 2007 22:11:41 +1000 Subject: [PATCH 167/437] fix missing brace placement for IOC_IN --- linux-core/drm_drv.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index dba0a7ba..8a675790 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -615,9 +615,10 @@ int drm_ioctl(struct inode *inode, struct file *filp, if (cmd & IOC_IN) { if (copy_from_user(kdata, (void __user *)arg, - _IOC_SIZE(cmd)) != 0) + _IOC_SIZE(cmd)) != 0) { retcode = -EACCES; - goto err_i1; + goto err_i1; + } } if (!func) { From 0844c46759b96d52c4952fceb96f7c6bb74b2ce7 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sat, 21 Jul 2007 23:13:25 +0300 Subject: [PATCH 168/437] Fix misc ioctl issues, makes Nouveau run. Debug print fix in drm_release(). Forgotten local variable init in drm_setversion(). Unnecessary put_user() in drm_addmap_ioctl(). ioctl->cmd check broken in drm_ioctl(); workaround. --- linux-core/drm_bufs.c | 3 +-- linux-core/drm_drv.c | 10 ++++++++-- linux-core/drm_fops.c | 2 +- linux-core/drm_ioctl.c | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index e8864df0..665bc65d 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -343,8 +343,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data, return err; /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - if (put_user((void *)(unsigned long)maplist->user_token, &map->handle)) - return -EFAULT; + map->handle = (void *)(unsigned long)maplist->user_token; return 0; } diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 8a675790..cc676bda 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -602,12 +602,18 @@ int drm_ioctl(struct inode *inode, struct file *filp, retcode = -EINVAL; goto err_i1; } - +#if 0 + /* + * This check is disabled, because driver private ioctl->cmd + * are not the ioctl commands with size and direction bits but + * just the indices. The DRM core ioctl->cmd are the proper ioctl + * commands. The drivers' ioctl tables need to be fixed. + */ if (ioctl->cmd != cmd) { retcode = -EINVAL; goto err_i1; } - +#endif func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 251ee5b5..ab5f4ca5 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -403,7 +403,7 @@ int drm_release(struct inode *inode, struct file *filp) */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - current->pid, (long)old_encode_dev(dev), + current->pid, (long)old_encode_dev(file_priv->head->device), dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 717e23c0..f3f757da 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -301,7 +301,7 @@ int drm_getstats(struct drm_device *dev, void *data, int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_set_version *sv = data; - int if_version, retcode; + int if_version, retcode = 0; if (sv->drm_di_major != -1) { if (sv->drm_di_major != DRM_IF_MAJOR || From 5d6fdd9d7924fde8ce62631e6bdce8d5fe33fc3d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 20:34:56 -0700 Subject: [PATCH 169/437] Clean up xgi_cmd_info and associated code. There were numerous unnecessary fields in xgi_cmd_info. The remaining fields had pretty crummy names. Cut out the cruft, and rename the rest. As a result, the unused parameter "triggerCounter" to triggerHWCommandList can be removed. --- linux-core/xgi_cmdlist.c | 36 +++++++++++++++--------------------- shared-core/xgi_drm.h | 11 ++++------- 2 files changed, 19 insertions(+), 28 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index b93541f3..5c31fa27 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -35,8 +35,7 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); static unsigned int get_batch_command(enum xgi_batch_type type); -static void triggerHWCommandList(struct xgi_info * info, - unsigned int triggerCounter); +static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(void); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) @@ -83,14 +82,14 @@ unsigned int get_batch_command(enum xgi_batch_type type) static void xgi_submit_cmdlist(struct xgi_info * info, const struct xgi_cmd_info * pCmdInfo) { - const unsigned int cmd = get_batch_command(pCmdInfo->_firstBeginType); + const unsigned int cmd = get_batch_command(pCmdInfo->type); u32 begin[4]; - begin[0] = (cmd << 24) | (BEGIN_VALID_MASK) | - (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->_curDebugID); - begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->_firstSize; - begin[2] = pCmdInfo->_firstBeginAddr >> 4; + begin[0] = (cmd << 24) | BEGIN_VALID_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id); + begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size; + begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; if (s_cmdring._lastBatchStartAddr == 0) { @@ -127,7 +126,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n"); - if (pCmdInfo->_firstBeginType == BTYPE_3D) { + if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); } @@ -141,10 +140,10 @@ static void xgi_submit_cmdlist(struct xgi_info * info, wmb(); lastBatchVirtAddr[0] = begin[0]; - triggerHWCommandList(info, pCmdInfo->_beginCount); + triggerHWCommandList(info); } - s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; + s_cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; DRM_INFO("%s: exit\n", __func__); } @@ -159,7 +158,7 @@ int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) (struct xgi_cmd_info __user *) data, sizeof(cmd_list)); - if (cmd_list._firstBeginType > BTYPE_CTRL) { + if (cmd_list.type > BTYPE_CTRL) { return DRM_ERR(EINVAL); } @@ -234,18 +233,13 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) } } -static void triggerHWCommandList(struct xgi_info * info, - unsigned int triggerCounter) +static void triggerHWCommandList(struct xgi_info * info) { static unsigned int s_triggerID = 1; - //Fix me, currently we just trigger one time - while (triggerCounter--) { - dwWriteReg(info->mmio_map, - BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, - 0x05000000 + (0x0ffff & s_triggerID++)); - // xgi_waitfor_pci_idle(info); - } + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0x0ffff & s_triggerID++)); } @@ -284,7 +278,7 @@ static void addFlush2D(struct xgi_info * info) lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK); - triggerHWCommandList(info, 1); + triggerHWCommandList(info); s_cmdring._cmdRingOffset += 0x20; s_cmdring._lastBatchStartAddr = flushBatchHWAddr; diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index 66cb4efb..6f50d4e1 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -85,13 +85,10 @@ enum xgi_batch_type { }; struct xgi_cmd_info { - unsigned int _firstBeginType; - __u32 _firstBeginAddr; - __u32 _firstSize; - __u32 _curDebugID; - __u32 _lastBeginAddr; - unsigned int _beginCount; - + __u32 type; + __u32 hw_addr; + __u32 size; + __u32 id; }; struct xgi_state_info { From 3265a61f895a1d35072984e9cdc71aad898647fa Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 20:39:22 -0700 Subject: [PATCH 170/437] Make s_cmdring a field in the xgi_info structure instead of a global. --- linux-core/xgi_cmdlist.c | 54 +++++++++++++++++++--------------------- linux-core/xgi_cmdlist.h | 1 + linux-core/xgi_drv.h | 3 +++ 3 files changed, 30 insertions(+), 28 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 5c31fa27..a040fa15 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -31,12 +31,10 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" -struct xgi_cmdring_info s_cmdring; - static void addFlush2D(struct xgi_info * info); static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); -static void xgi_cmdlist_reset(void); +static void xgi_cmdlist_reset(struct xgi_info * info); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { @@ -51,11 +49,11 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - s_cmdring._cmdRingSize = mem_alloc.size; - s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; - s_cmdring._cmdRingAllocOffset = mem_alloc.offset; - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; + info->cmdring._cmdRingSize = mem_alloc.size; + info->cmdring._cmdRingBuffer = mem_alloc.hw_addr; + info->cmdring._cmdRingAllocOffset = mem_alloc.offset; + info->cmdring._lastBatchStartAddr = 0; + info->cmdring._cmdRingOffset = 0; return 0; } @@ -92,7 +90,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; - if (s_cmdring._lastBatchStartAddr == 0) { + if (info->cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); @@ -124,7 +122,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } else { u32 *lastBatchVirtAddr; - DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n"); + DRM_INFO("info->cmdring._lastBatchStartAddr != 0\n"); if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); @@ -132,7 +130,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, lastBatchVirtAddr = xgi_find_pcie_virt(info, - s_cmdring._lastBatchStartAddr); + info->cmdring._lastBatchStartAddr); lastBatchVirtAddr[1] = begin[1]; lastBatchVirtAddr[2] = begin[2]; @@ -143,7 +141,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, triggerHWCommandList(info); } - s_cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; + info->cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; DRM_INFO("%s: exit\n", __func__); } @@ -188,7 +186,7 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, // stop to received batch } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { DRM_INFO("[kd] I see, now is to enterVT\n"); - xgi_cmdlist_reset(); + xgi_cmdlist_reset(info); } else if ((from == STATE_GRAPHIC) && ((to == STATE_LOGOUT) || (to == STATE_REBOOT) @@ -217,19 +215,19 @@ int xgi_state_change_ioctl(DRM_IOCTL_ARGS) } -void xgi_cmdlist_reset(void) +void xgi_cmdlist_reset(struct xgi_info * info) { - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; + info->cmdring._lastBatchStartAddr = 0; + info->cmdring._cmdRingOffset = 0; } void xgi_cmdlist_cleanup(struct xgi_info * info) { - if (s_cmdring._cmdRingBuffer != 0) { - xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL); - s_cmdring._cmdRingBuffer = 0; - s_cmdring._cmdRingOffset = 0; - s_cmdring._cmdRingSize = 0; + if (info->cmdring._cmdRingBuffer != 0) { + xgi_pcie_free(info, info->cmdring._cmdRingAllocOffset, NULL); + info->cmdring._cmdRingBuffer = 0; + info->cmdring._cmdRingOffset = 0; + info->cmdring._cmdRingSize = 0; } } @@ -250,11 +248,11 @@ static void addFlush2D(struct xgi_info * info) u32 *lastBatchVirtAddr; /* check buf is large enough to contain a new flush batch */ - if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { - s_cmdring._cmdRingOffset = 0; + if ((info->cmdring._cmdRingOffset + 0x20) >= info->cmdring._cmdRingSize) { + info->cmdring._cmdRingOffset = 0; } - flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; + flushBatchHWAddr = info->cmdring._cmdRingBuffer + info->cmdring._cmdRingOffset; flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr); /* not using memcpy for I assume the address is discrete */ @@ -267,9 +265,9 @@ static void addFlush2D(struct xgi_info * info) *(flushBatchVirtAddr + 6) = FLUSH_2D; *(flushBatchVirtAddr + 7) = FLUSH_2D; - // ASSERT(s_cmdring._lastBatchStartAddr != NULL); + // ASSERT(info->cmdring._lastBatchStartAddr != NULL); lastBatchVirtAddr = - xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + xgi_find_pcie_virt(info, info->cmdring._lastBatchStartAddr); lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; @@ -280,6 +278,6 @@ static void addFlush2D(struct xgi_info * info) triggerHWCommandList(info); - s_cmdring._cmdRingOffset += 0x20; - s_cmdring._lastBatchStartAddr = flushBatchHWAddr; + info->cmdring._cmdRingOffset += 0x20; + info->cmdring._lastBatchStartAddr = flushBatchHWAddr; } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 08029386..7f2c54ac 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -66,6 +66,7 @@ struct xgi_cmdring_info { u32 _cmdRingOffset; }; +struct xgi_info; extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); extern int xgi_state_change(struct xgi_info * info, unsigned int to, diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 20965876..0aab04d8 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -42,6 +42,7 @@ #define DRIVER_MINOR 8 #define DRIVER_PATCHLEVEL 0 +#include "xgi_cmdlist.h" #include "xgi_drm.h" struct xgi_aperture { @@ -90,6 +91,8 @@ struct xgi_info { struct semaphore fb_sem; struct semaphore pcie_sem; + + struct xgi_cmdring_info cmdring; }; enum PcieOwner { From 1a0775760c0eecbb238f0e928b185c267c1c3783 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 21:35:06 -0700 Subject: [PATCH 171/437] Rename and document fields of xgi_cmdring_info. --- linux-core/xgi_cmdlist.c | 71 ++++++++++++++++++---------------------- linux-core/xgi_cmdlist.h | 33 ++++++++++++++++--- 2 files changed, 59 insertions(+), 45 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index a040fa15..53bada50 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -49,11 +49,12 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - info->cmdring._cmdRingSize = mem_alloc.size; - info->cmdring._cmdRingBuffer = mem_alloc.hw_addr; - info->cmdring._cmdRingAllocOffset = mem_alloc.offset; - info->cmdring._lastBatchStartAddr = 0; - info->cmdring._cmdRingOffset = 0; + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.offset); + info->cmdring.size = mem_alloc.size; + info->cmdring.ring_hw_base = mem_alloc.hw_addr; + info->cmdring.ring_gart_base = mem_alloc.offset; + info->cmdring.last_ptr = NULL; + info->cmdring.ring_offset = 0; return 0; } @@ -90,7 +91,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; - if (info->cmdring._lastBatchStartAddr == 0) { + if (info->cmdring.last_ptr == NULL) { const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); @@ -120,28 +121,22 @@ static void xgi_submit_cmdlist(struct xgi_info * info, dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); } else { - u32 *lastBatchVirtAddr; - - DRM_INFO("info->cmdring._lastBatchStartAddr != 0\n"); + DRM_INFO("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); } - lastBatchVirtAddr = - xgi_find_pcie_virt(info, - info->cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = begin[1]; - lastBatchVirtAddr[2] = begin[2]; - lastBatchVirtAddr[3] = begin[3]; + info->cmdring.last_ptr[1] = begin[1]; + info->cmdring.last_ptr[2] = begin[2]; + info->cmdring.last_ptr[3] = begin[3]; wmb(); - lastBatchVirtAddr[0] = begin[0]; + info->cmdring.last_ptr[0] = begin[0]; triggerHWCommandList(info); } - info->cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; + info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); DRM_INFO("%s: exit\n", __func__); } @@ -217,17 +212,17 @@ int xgi_state_change_ioctl(DRM_IOCTL_ARGS) void xgi_cmdlist_reset(struct xgi_info * info) { - info->cmdring._lastBatchStartAddr = 0; - info->cmdring._cmdRingOffset = 0; + info->cmdring.last_ptr = NULL; + info->cmdring.ring_offset = 0; } void xgi_cmdlist_cleanup(struct xgi_info * info) { - if (info->cmdring._cmdRingBuffer != 0) { - xgi_pcie_free(info, info->cmdring._cmdRingAllocOffset, NULL); - info->cmdring._cmdRingBuffer = 0; - info->cmdring._cmdRingOffset = 0; - info->cmdring._cmdRingSize = 0; + if (info->cmdring.ring_hw_base != 0) { + xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL); + info->cmdring.ring_hw_base = 0; + info->cmdring.ring_offset = 0; + info->cmdring.size = 0; } } @@ -245,15 +240,15 @@ static void addFlush2D(struct xgi_info * info) { u32 *flushBatchVirtAddr; u32 flushBatchHWAddr; - u32 *lastBatchVirtAddr; /* check buf is large enough to contain a new flush batch */ - if ((info->cmdring._cmdRingOffset + 0x20) >= info->cmdring._cmdRingSize) { - info->cmdring._cmdRingOffset = 0; + if ((info->cmdring.ring_offset + 0x20) >= info->cmdring.size) { + info->cmdring.ring_offset = 0; } - flushBatchHWAddr = info->cmdring._cmdRingBuffer + info->cmdring._cmdRingOffset; - flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr); + flushBatchHWAddr = info->cmdring.ring_hw_base + info->cmdring.ring_offset; + flushBatchVirtAddr = info->cmdring.ptr + + (info->cmdring.ring_offset / 4); /* not using memcpy for I assume the address is discrete */ *(flushBatchVirtAddr + 0) = 0x10000000; @@ -265,19 +260,15 @@ static void addFlush2D(struct xgi_info * info) *(flushBatchVirtAddr + 6) = FLUSH_2D; *(flushBatchVirtAddr + 7) = FLUSH_2D; - // ASSERT(info->cmdring._lastBatchStartAddr != NULL); - lastBatchVirtAddr = - xgi_find_pcie_virt(info, info->cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; - lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; - lastBatchVirtAddr[3] = 0; + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; + info->cmdring.last_ptr[2] = flushBatchHWAddr >> 4; + info->cmdring.last_ptr[3] = 0; wmb(); - lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24) + info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK); triggerHWCommandList(info); - info->cmdring._cmdRingOffset += 0x20; - info->cmdring._lastBatchStartAddr = flushBatchHWAddr; + info->cmdring.ring_offset += 0x20; + info->cmdring.last_ptr = flushBatchVirtAddr; } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 7f2c54ac..5b444cf3 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -59,11 +59,34 @@ typedef enum { } CMD_SIZE; struct xgi_cmdring_info { - unsigned int _cmdRingSize; - u32 _cmdRingBuffer; - unsigned long _cmdRingAllocOffset; - u32 _lastBatchStartAddr; - u32 _cmdRingOffset; + /** + * Kernel space pointer to the base of the command ring. + */ + u32 * ptr; + + /** + * Size, in bytes, of the command ring. + */ + unsigned int size; + + /** + * Base address of the command ring from the hardware's PoV. + */ + unsigned int ring_hw_base; + + /** + * Offset, in bytes, from the base of PCI-e GART space to the start + * of the ring. + */ + unsigned long ring_gart_base; + + u32 * last_ptr; + + /** + * Offset, in bytes, from the start of the ring to the next available + * location to store a command. + */ + unsigned int ring_offset; }; struct xgi_info; From 877296ade051cd45c0c2e0354b9f6765f8030413 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 21:36:11 -0700 Subject: [PATCH 172/437] xgi_mem_alloc::offset is a hardware offset, so it should be u32, not long. --- shared-core/xgi_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index 6f50d4e1..da2ea8b9 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -73,7 +73,7 @@ struct xgi_mem_alloc { /** * Offset of the allocation in the mapping. */ - unsigned long offset; + __u32 offset; }; enum xgi_batch_type { From 699207cf2fa0c5255365dd28eeb3dd760f362818 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 21:37:45 -0700 Subject: [PATCH 173/437] Remove some extraneous debug messages. --- linux-core/xgi_cmdlist.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 53bada50..98d80ef0 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -97,8 +97,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, /* Enable PCI Trigger Mode */ - DRM_INFO("Enable PCI Trigger Mode \n"); - dwWriteReg(info->mmio_map, BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | @@ -113,9 +111,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, /* Send PCI begin command */ - DRM_INFO("portOffset=%d, beginPort=%d\n", - portOffset, cmd << 2); - dwWriteReg(info->mmio_map, portOffset, begin[0]); dwWriteReg(info->mmio_map, portOffset + 4, begin[1]); dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); From 94203840fe53edaf1556d1a0a8a27773c24a7fc5 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 23:00:01 -0700 Subject: [PATCH 174/437] Bump version. --- linux-core/xgi_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 0aab04d8..16102950 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -36,10 +36,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070710" +#define DRIVER_DATE "20070721" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 8 +#define DRIVER_MINOR 9 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" From 7e6d08f670a55d79ee037144aa29104e4e8fc700 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 23 Jul 2007 18:15:00 +1000 Subject: [PATCH 175/437] drm_rmmap_ioctl(): remove dead code This patch removes some obviously dead code spotted by the Coverity checker. Signed-off-by: Adrian Bunk --- linux-core/drm_bufs.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 665bc65d..f9987ca6 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -467,11 +467,6 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (!map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - /* Register and framebuffer maps are permanent */ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { mutex_unlock(&dev->struct_mutex); From 2097d743f287d994bd001baedd39e3bc808999c6 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 23 Jul 2007 13:26:28 -0700 Subject: [PATCH 176/437] Eliminate XGI_CHECK_PCI_CONFIG. Based on review comments from airlied, XGI_CHECK_PCI_CONFIG is removed. He believes (and I tend to agree) that this is a largely unnecessary workaround for a bug elsewhere. --- linux-core/xgi_drv.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bcb6946d..19a9a10d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -139,40 +139,6 @@ MODULE_LICENSE("GPL and additional rights"); void xgi_kern_isr_bh(struct drm_device *dev); -/* - * verify access to pci config space wasn't disabled behind our back - * unfortunately, XFree86 enables/disables memory access in pci config space at - * various times (such as restoring initial pci config space settings during vt - * switches or when doing mulicard). As a result, all of our register accesses - * are garbage at this point. add a check to see if access was disabled and - * reenable any such access. - */ -#define XGI_CHECK_PCI_CONFIG(xgi) \ - xgi_check_pci_config(xgi, __LINE__) - -static inline void xgi_check_pci_config(struct xgi_info * info, int line) -{ - u16 cmd; - bool flag = 0; - - pci_read_config_word(info->dev->pdev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_MASTER)) { - DRM_INFO("restoring bus mastering! (%d)\n", line); - cmd |= PCI_COMMAND_MASTER; - flag = 1; - } - - if (!(cmd & PCI_COMMAND_MEMORY)) { - DRM_INFO("restoring MEM access! (%d)\n", line); - cmd |= PCI_COMMAND_MEMORY; - flag = 1; - } - - if (flag) - pci_write_config_word(info->dev->pdev, PCI_COMMAND, cmd); -} - - int xgi_bootstrap(DRM_IOCTL_ARGS) { DRM_DEVICE; @@ -262,8 +228,6 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) //DRM_INFO("xgi_kern_isr \n"); - //XGI_CHECK_PCI_CONFIG(info); - //xgi_dvi_irq_handler(info); if (need_to_run_bottom_half) { @@ -280,8 +244,6 @@ void xgi_kern_isr_bh(struct drm_device *dev) DRM_INFO("xgi_kern_isr_bh \n"); //xgi_dvi_irq_handler(info); - - XGI_CHECK_PCI_CONFIG(info); } int xgi_driver_load(struct drm_device *dev, unsigned long flags) From 03e932e32be6ae3de6994c6893c813a34623ad7d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 23 Jul 2007 15:11:12 -0700 Subject: [PATCH 177/437] linux: Make DRM_IOCTL_GET_CLIENT return EINVAL when it can't find client #idx. Fixes the getclient test and dritest -c. --- linux-core/drm_ioctl.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index f3f757da..9d52fd8a 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -229,26 +229,23 @@ int drm_getclient(struct drm_device *dev, void *data, idx = client->idx; mutex_lock(&dev->struct_mutex); - - if (list_empty(&dev->filelist)) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } i = 0; list_for_each_entry(pt, &dev->filelist, lhead) { - if (i++ >= idx) - break; - } + if (i++ >= idx) { + client->auth = pt->authenticated; + client->pid = pt->pid; + client->uid = pt->uid; + client->magic = pt->magic; + client->iocs = pt->ioctl_count; + mutex_unlock(&dev->struct_mutex); - client->auth = pt->authenticated; - client->pid = pt->pid; - client->uid = pt->uid; - client->magic = pt->magic; - client->iocs = pt->ioctl_count; + return 0; + } + } mutex_unlock(&dev->struct_mutex); - return 0; + return -EINVAL; } /** From 388a2c54eea7575a5b046da3df09f7a1c63551d6 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 23 Jul 2007 18:50:07 -0700 Subject: [PATCH 178/437] Minor log message clean up. --- linux-core/xgi_cmdlist.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 98d80ef0..f5fc1b94 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -116,7 +116,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); } else { - DRM_INFO("info->cmdring.last_ptr != NULL\n"); + DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); @@ -132,7 +132,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); - DRM_INFO("%s: exit\n", __func__); } @@ -172,19 +171,17 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, #define STATE_SHUTDOWN 5 if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) { - DRM_INFO("[kd] I see, now is to leaveVT\n"); - // stop to received batch + DRM_INFO("Leaving graphical mode (probably VT switch)\n"); } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { - DRM_INFO("[kd] I see, now is to enterVT\n"); + DRM_INFO("Entering graphical mode (probably VT switch)\n"); xgi_cmdlist_reset(info); } else if ((from == STATE_GRAPHIC) && ((to == STATE_LOGOUT) || (to == STATE_REBOOT) || (to == STATE_SHUTDOWN))) { - DRM_INFO("[kd] I see, not is to exit from X\n"); - // stop to received batch + DRM_INFO("Leaving graphical mode (probably X shutting down)\n"); } else { - DRM_ERROR("[kd] Should not happen\n"); + DRM_ERROR("Invalid state change.\n"); return DRM_ERR(EINVAL); } From 46214fc3979ed60b32289ade1b8efbba1c8bf732 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 23 Jul 2007 18:50:52 -0700 Subject: [PATCH 179/437] Minor log message clean up. --- linux-core/xgi_regs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index b211626a..f5311c1e 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -76,9 +76,10 @@ static inline u8 IN3CFB(struct drm_map * map, u8 index) */ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) { +#ifdef XGI_MMIO_DEBUG DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", map->handle, addr, data); - +#endif DRM_WRITE32(map, addr, data); } From 887cb31ee9ec04e45829500f095aa4a3bc1095ea Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 13:27:44 -0700 Subject: [PATCH 180/437] Fix bug preventing X server from restarting. The core DRM lastclose routine automatically destroys all mappings and releases SG memory. XP10 DRM and DDX assumed this data stayed around until module unload. xgi_bootstrap was reworked to recreate all these mappings. In addition, the drm_addmap for the GART backing store was moved into the kernel. This causes a change to the ioctl protocol and a version bump. --- linux-core/xgi_drv.c | 128 ++++++++++++++++++++++++++++-------------- linux-core/xgi_drv.h | 4 +- shared-core/xgi_drm.h | 4 +- 3 files changed, 90 insertions(+), 46 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 19a9a10d..11d6e950 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -64,6 +64,7 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static int xgi_driver_load(struct drm_device *dev, unsigned long flags); static int xgi_driver_unload(struct drm_device *dev); static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp); +static void xgi_driver_lastclose(drm_device_t * dev); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); @@ -75,6 +76,7 @@ static struct drm_driver driver = { .load = xgi_driver_load, .unload = xgi_driver_unload, .preclose = xgi_driver_preclose, + .lastclose = xgi_driver_lastclose, .dma_quiescent = NULL, .irq_preinstall = NULL, .irq_postinstall = NULL, @@ -144,26 +146,25 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) DRM_DEVICE; struct xgi_info *info = dev->dev_private; struct xgi_bootstrap bs; + struct drm_map_list *maplist; int err; DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data, sizeof(bs)); - if (info->bootstrap_done) { - return 0; + if (info->mmio_map == NULL) { + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; + } + + xgi_enable_mmio(info); } - err = drm_addmap(dev, info->mmio.base, info->mmio.size, - _DRM_REGISTERS, _DRM_KERNEL, - &info->mmio_map); - if (err) { - DRM_ERROR("Unable to map MMIO region: %d\n", err); - return err; - } - - xgi_enable_mmio(info); - //xgi_enable_ge(info); info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; @@ -172,38 +173,64 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) if ((info->fb.base == 0) || (info->fb.size == 0)) { - DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", + DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->fb.base, info->fb.size); return DRM_ERR(EINVAL); } /* Init the resource manager */ - err = xgi_fb_heap_init(info); - if (err) { - DRM_ERROR("xgi_fb_heap_init() failed\n"); - return err; + if (!info->fb_heap.initialized) { + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("Unable to initialize FB heap.\n"); + return err; + } } - - info->pcie.size = bs.gart_size * (1024 * 1024); + info->pcie.size = bs.gart.size; /* Init the resource manager */ - err = xgi_pcie_heap_init(info); - if (err) { - DRM_ERROR("xgi_pcie_heap_init() failed\n"); - return err; + if (!info->pcie_heap.initialized) { + err = xgi_pcie_heap_init(info); + if (err) { + DRM_ERROR("Unable to initialize GART heap.\n"); + return err; + } + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + err = xgi_cmdlist_initialize(info, 0x100000); + if (err) { + DRM_ERROR("xgi_cmdlist_initialize() failed\n"); + return err; + } } - /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - err = xgi_cmdlist_initialize(info, 0x100000); - if (err) { - DRM_ERROR("xgi_cmdlist_initialize() failed\n"); - return err; + + if (info->pcie_map == NULL) { + err = drm_addmap(info->dev, 0, info->pcie.size, + _DRM_SCATTER_GATHER, _DRM_LOCKED, + & info->pcie_map); + if (err) { + DRM_ERROR("Could not add map for GART backing " + "store.\n"); + return err; + } } - info->bootstrap_done = 1; + + maplist = drm_find_matching_map(dev, info->pcie_map); + if (maplist == NULL) { + DRM_ERROR("Could not find GART backing store map.\n"); + return DRM_ERR(EINVAL); + } + + bs.gart = *info->pcie_map; + bs.gart.handle = (void *)(unsigned long) maplist->user_token; + DRM_COPY_TO_USER_IOCTL((struct xgi_bootstrap __user *) data, + bs, sizeof(bs)); + return 0; } @@ -217,6 +244,33 @@ void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) } +void xgi_driver_lastclose(drm_device_t * dev) +{ + struct xgi_info * info = dev->dev_private; + + if (info != NULL) { + /* The core DRM lastclose routine will destroy all of our + * mappings for us. NULL out the pointers here so that + * xgi_bootstrap can do the right thing. + */ + info->pcie_map = NULL; + info->mmio_map = NULL; + info->fb_map = NULL; + + xgi_cmdlist_cleanup(info); + + if (info->fb_heap.initialized) { + xgi_mem_heap_cleanup(&info->fb_heap); + } + + if (info->pcie_heap.initialized) { + xgi_mem_heap_cleanup(&info->pcie_heap); + xgi_pcie_lut_cleanup(info); + } + } +} + + /* * driver receives an interrupt if someone waiting, then hand it off. */ @@ -298,23 +352,13 @@ int xgi_driver_unload(struct drm_device *dev) { struct xgi_info * info = dev->dev_private; - xgi_cmdlist_cleanup(info); - if (info->fb_map != NULL) { - drm_rmmap(info->dev, info->fb_map); - } - - if (info->mmio_map != NULL) { - drm_rmmap(info->dev, info->mmio_map); - } - - xgi_mem_heap_cleanup(&info->fb_heap); - xgi_mem_heap_cleanup(&info->pcie_heap); - xgi_pcie_lut_cleanup(info); - if (xgi_mem_block_cache) { kmem_cache_destroy(xgi_mem_block_cache); xgi_mem_block_cache = NULL; } + drm_free(info, sizeof(*info), DRM_MEM_DRIVER); + dev->dev_private = NULL; + return 0; } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 16102950..ae5fe07e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -36,10 +36,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070721" +#define DRIVER_DATE "20070723" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 9 +#define DRIVER_MINOR 10 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index da2ea8b9..adce7066 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -49,7 +49,7 @@ struct xgi_bootstrap { /** * Size of PCI-e GART range in megabytes. */ - unsigned int gart_size; + struct drm_map gart; }; @@ -113,7 +113,7 @@ struct xgi_state_info { #define DRM_XGI_TEST_RWINKERNEL 9 #define DRM_XGI_STATE_CHANGE 10 -#define XGI_IOCTL_BOOTSTRAP DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap) +#define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap) #define XGI_IOCTL_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_FB_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_FB_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FB_FREE, __u32) From 2ef2997ee38ff359c331b6a3febf194bd46e4962 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 13:29:29 -0700 Subject: [PATCH 181/437] Fix flags for serveral ioctls. --- linux-core/xgi_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 11d6e950..3b520850 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -42,7 +42,7 @@ static struct pci_device_id pciidlist[] = { static int xgi_bootstrap(DRM_IOCTL_ARGS); static drm_ioctl_desc_t xgi_ioctls[] = { - [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH}, @@ -55,7 +55,7 @@ static drm_ioctl_desc_t xgi_ioctls[] = { [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER}, }; static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); From 8e64d2ae862d5fa02e23c68db6b55393e1f86005 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 13:36:02 -0700 Subject: [PATCH 182/437] Fix license formatting. --- linux-core/xgi_cmdlist.c | 46 +++++++++++++++++++--------------------- linux-core/xgi_cmdlist.h | 46 +++++++++++++++++++--------------------- linux-core/xgi_drv.c | 46 +++++++++++++++++++--------------------- linux-core/xgi_drv.h | 46 +++++++++++++++++++--------------------- linux-core/xgi_fb.c | 46 +++++++++++++++++++--------------------- linux-core/xgi_misc.c | 46 +++++++++++++++++++--------------------- linux-core/xgi_misc.h | 44 ++++++++++++++++++-------------------- linux-core/xgi_pcie.c | 46 +++++++++++++++++++--------------------- linux-core/xgi_regs.h | 44 ++++++++++++++++++-------------------- 9 files changed, 196 insertions(+), 214 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index f5fc1b94..e4f9dbcd 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 5b444cf3..604c9aac 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_CMDLIST_H_ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 3b520850..2c3384b0 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "drmP.h" diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index ae5fe07e..2061189a 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_DRV_H_ diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index a5885198..3d3b2ae0 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 7f3d9d6e..5e8c3da8 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 10638b2d..af19a11a 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -1,29 +1,27 @@ - /**************************************************************************** * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_MISC_H_ diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 9dee888b..537e82f5 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index f5311c1e..34268a56 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_REGS_H_ From 75a68635a8f7b0d4fb31031832cc282a39a4a1e7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 15:53:50 -0700 Subject: [PATCH 183/437] Pass correct offset to xgi_find_pcie_virt. The wrong offset was being passed to xgi_find_pcie_virt. This would cause an oops in addFlush2D. --- linux-core/xgi_cmdlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e4f9dbcd..8ba8dc75 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -47,7 +47,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.offset); + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_offset); info->cmdring.size = mem_alloc.size; info->cmdring.ring_hw_base = mem_alloc.hw_addr; info->cmdring.ring_gart_base = mem_alloc.offset; From 2bafeb673f14b1e3799bf00817138c0b8211635e Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 16:17:30 -0700 Subject: [PATCH 184/437] Fix typo on previous commit. Sigh... --- linux-core/xgi_cmdlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 8ba8dc75..490e9f39 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -47,7 +47,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_offset); + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr); info->cmdring.size = mem_alloc.size; info->cmdring.ring_hw_base = mem_alloc.hw_addr; info->cmdring.ring_gart_base = mem_alloc.offset; From be3099f26547f48066bbdd7a36578b54da9170b4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Sun, 22 Jul 2007 09:51:34 +0100 Subject: [PATCH 185/437] Fix copy'n'paste-o in FreeBSD drawable code. --- bsd-core/drm_drawable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index 14a29407..7e038ab9 100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -45,7 +45,7 @@ drm_drawable_compare(struct bsd_drm_drawable_info *a, { if (a->handle > b->handle) return 1; - if (a->handle > b->handle) + if (a->handle < b->handle) return -1; return 0; } From f2528cbc965858c6a7a81d659f9d5f4da290b5ae Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 25 Jul 2007 12:54:15 -0700 Subject: [PATCH 186/437] Improve the drawable test to use multiple drawables. --- tests/updatedraw.c | 63 ++++++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/tests/updatedraw.c b/tests/updatedraw.c index 1186783a..2f22fef2 100644 --- a/tests/updatedraw.c +++ b/tests/updatedraw.c @@ -83,16 +83,10 @@ set_draw_cliprects_2(int fd, int drawable) assert(ret == 0); } -/** - * Tests drawable management: adding, removing, and updating the cliprects of - * drawables. - */ -int main(int argc, char **argv) +static int add_drawable(int fd) { drm_draw_t drawarg; - int fd, ret, drawable; - - fd = drm_open_any_master(); + int ret; /* Create a drawable. * IOCTL_ADD_DRAW is RDWR, though it should really just be RD @@ -100,27 +94,54 @@ int main(int argc, char **argv) drawarg.handle = 0; ret = ioctl(fd, DRM_IOCTL_ADD_DRAW, &drawarg); assert(ret == 0); - drawable = drawarg.handle; + return drawarg.handle; +} - /* Do a series of cliprect updates */ - set_draw_cliprects_empty(fd, drawable); - set_draw_cliprects_2(fd, drawable); - set_draw_cliprects_empty(fd, drawable); +static int rm_drawable(int fd, int drawable, int fail) +{ + drm_draw_t drawarg; + int ret; - /* Remove our drawable */ + /* Create a drawable. + * IOCTL_ADD_DRAW is RDWR, though it should really just be RD + */ drawarg.handle = drawable; ret = ioctl(fd, DRM_IOCTL_RM_DRAW, &drawarg); - assert(ret == 0); - drawable = drawarg.handle; + if (!fail) + assert(ret == 0); + else + assert(ret == -1 && errno == EINVAL); + + return drawarg.handle; +} + +/** + * Tests drawable management: adding, removing, and updating the cliprects of + * drawables. + */ +int main(int argc, char **argv) +{ + int fd, ret, d1, d2; + + fd = drm_open_any_master(); + + d1 = add_drawable(fd); + d2 = add_drawable(fd); + /* Do a series of cliprect updates */ + set_draw_cliprects_empty(fd, d1); + set_draw_cliprects_empty(fd, d2); + set_draw_cliprects_2(fd, d1); + set_draw_cliprects_empty(fd, d1); + + /* Remove our drawables */ + rm_drawable(fd, d1, 0); + rm_drawable(fd, d2, 0); /* Check that removing an unknown drawable returns error */ - drawarg.handle = 0x7fffffff; - ret = ioctl(fd, DRM_IOCTL_RM_DRAW, &drawarg); - assert(ret == -1 && errno == EINVAL); - drawable = drawarg.handle; + rm_drawable(fd, 0x7fffffff, 1); /* Attempt to set cliprects on a nonexistent drawable */ - set_draw_cliprects_empty_fail(fd, drawable); + set_draw_cliprects_empty_fail(fd, d1); close(fd); return 0; From e2d00715cc1d3919aadd5066ef7355049f417bd2 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 26 Jul 2007 13:25:21 +1000 Subject: [PATCH 187/437] drm: fix size argument for copy to/from user --- linux-core/drm_bo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 75d89e46..b47b657f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1720,7 +1720,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr if (next != 0) { curuserarg = (void __user *)next; if (copy_from_user(&curarg, curuserarg, - sizeof(arg)) != 0) + sizeof(curarg)) != 0) return -EFAULT; arg = &curarg; } @@ -1768,7 +1768,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr arg->d.rep.bo_info = rep; if (arg != data) { if (copy_to_user(curuserarg, &curarg, - sizeof(arg)) != 0) + sizeof(curarg)) != 0) return -EFAULT; } } while (next != 0); From 4175dd818110bd10d6d09190d30c271e89202b18 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 26 Jul 2007 15:26:36 +1000 Subject: [PATCH 188/437] drm/bo: set the req pointer for each buffer to validate --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b47b657f..467d03ff 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1729,7 +1729,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr next = arg->next; continue; } - + req = &arg->d.req; ret = 0; switch (req->op) { case drm_bo_validate: From cf2d569daca6954d11a796f4d110148ae2e0c827 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 26 Jul 2007 10:14:17 -0700 Subject: [PATCH 189/437] Replace NO_MOVE/NO_EVICT flags to buffer objects with an ioctl to set pinning. This cleans up the create/validate interfaces for this very uncommon path, and makes pinned object creation much easier to use for the X Server. --- libdrm/xf86drm.c | 25 +++++- libdrm/xf86mm.h | 1 + linux-core/drm_bo.c | 169 +++++++++++++++++++++++++++------------ linux-core/drm_drv.c | 1 + linux-core/drm_objects.h | 3 +- shared-core/drm.h | 27 ++++--- 6 files changed, 165 insertions(+), 61 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 8cee4fbc..63242bce 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2975,7 +2975,30 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint) } return 0; } - + +int drmBOSetPin(int fd, drmBO *buf, int pin) +{ + struct drm_bo_set_pin_arg arg; + struct drm_bo_set_pin_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret = 0; + + memset(&arg, 0, sizeof(arg)); + req->handle = buf->handle; + req->pin = pin; + + do { + ret = ioctl(fd, DRM_IOCTL_BO_SET_PIN, &arg); + } while (ret && errno == EAGAIN); + + if (ret) + return -errno; + + drmBOCopyReply(rep, buf); + + return 0; +} + int drmBOBusy(int fd, drmBO *buf, int *busy) { if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) && diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index d1e0b28f..d86644ca 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -201,6 +201,7 @@ extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, extern int drmBOValidateList(int fd, drmBOList *list); extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle); extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint); +int drmBOSetPin(int fd, drmBO *buf, int pin); /* * Initialization functions. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 75d89e46..53885a3e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -80,8 +80,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo) DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); - if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) - || bo->mem.mem_type != bo->pinned_mem_type) { + if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); } else { @@ -733,7 +732,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->pinned); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -893,18 +892,6 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } - if (bo->type == drm_bo_type_fake && - !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { - DRM_ERROR("Fake buffers must be pinned.\n"); - return -EINVAL; - } - - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); - return -EPERM; - } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1382,6 +1369,12 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } + if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) { + DRM_ERROR("Attempt to validate pinned buffer into different memory " + "type\n"); + return -EINVAL; + } + /* * We're switching command submission mechanism, * or cannot simply rely on the hardware serializing for us. @@ -1425,37 +1418,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, } } - /* - * Pinned buffers. - */ - - if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - bo->pinned_mem_type = bo->mem.mem_type; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->pinned_lru); - drm_bo_add_to_pinned_lru(bo); - - if (bo->pinned_node != bo->mem.mm_node) { - if (bo->pinned_node != NULL) - drm_mm_put_block(bo->pinned_node); - bo->pinned_node = bo->mem.mm_node; - } - - mutex_unlock(&dev->struct_mutex); - - } else if (bo->pinned_node != NULL) { - - mutex_lock(&dev->struct_mutex); - - if (bo->pinned_node != bo->mem.mm_node) - drm_mm_put_block(bo->pinned_node); - - list_del_init(&bo->pinned_lru); - bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); - - } - /* * We might need to add a TTM. */ @@ -1533,6 +1495,10 @@ static int drm_bo_handle_validate(struct drm_file *file_priv, return ret; } +/** + * Fills out the generic buffer object ioctl reply with the information for + * the BO with id of handle. + */ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { @@ -1948,6 +1914,110 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } +/** + * Pins or unpins the given buffer object in the given memory area. + * + * Pinned buffers will not be evicted from or move within their memory area. + * Must be called with the hardware lock held for pinning. + */ +static int +drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, + int pin) +{ + int ret = 0; + + mutex_lock(&bo->mutex); + if (bo->pinned == pin) { + mutex_unlock(&bo->mutex); + return 0; + } + + if (pin) { + ret = drm_bo_wait_unfenced(bo, 0, 0); + if (ret) { + mutex_unlock(&bo->mutex); + return ret; + } + + /* Validate the buffer into its pinned location, with no pending + * fence. + */ + ret = drm_buffer_object_validate(bo, 0, 0, 0); + if (ret) { + mutex_unlock(&bo->mutex); + return ret; + } + + /* Add our buffer to the pinned list */ + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else { + mutex_lock(&dev->struct_mutex); + + /* Remove our buffer from the pinned list */ + if (bo->pinned_node != bo->mem.mm_node) + drm_mm_put_block(bo->pinned_node); + + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + bo->pinned = pin; + mutex_unlock(&bo->mutex); + return 0; +} + +int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_bo_set_pin_arg *arg = data; + struct drm_bo_set_pin_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_buffer_object *bo; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + if (req->pin < 0 || req->pin > 1) { + DRM_ERROR("Bad arguments to set_pin\n"); + return -EINVAL; + } + + if (req->pin) + LOCK_TEST_WITH_RETURN(dev, file_priv); + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, req->handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { + return -EINVAL; + } + + ret = drm_bo_set_pin(dev, bo, req->pin); + if (ret) { + drm_bo_usage_deref_unlocked(&bo); + return ret; + } + + drm_bo_fill_rep_arg(bo, rep); + drm_bo_usage_deref_unlocked(&bo); + + return 0; +} /** @@ -2009,11 +2079,10 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo, mutex_unlock(&dev->struct_mutex); } - if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { - DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + if (bo->pinned) { + DRM_ERROR("A pinned buffer was present at " "cleanup. Removing flag and evicting.\n"); - bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; - bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + bo->pinned = 0; } if (bo->mem.mem_type == mem_type) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index cc676bda..93dfcdb5 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -147,6 +147,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e5f2b69c..64f71651 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -362,6 +362,7 @@ struct drm_buffer_object { struct mutex mutex; /* For pinned buffers */ + int pinned; struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; @@ -455,7 +456,7 @@ extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct d extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); - +int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/shared-core/drm.h b/shared-core/drm.h index db913b1f..57d1dbec 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -688,14 +688,6 @@ struct drm_fence_arg { * Can also be set in the buffer mask before validation. */ -/* - * Mask: Never evict this buffer. Not even with force. This type of buffer is only - * available to root and must be manually removed before buffer manager shutdown - * or lock. - * Flags: Acknowledge - */ -#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) - /* * Mask: Require that the buffer is placed in mappable memory when validated. * If not set the buffer may or may not be in mappable memory when validated. @@ -813,6 +805,16 @@ struct drm_bo_op_req { struct drm_bo_info_req bo_req; }; +struct drm_bo_set_pin_req { + /** Buffer object ID */ + unsigned int handle; + /** + * - 0: Unpin the given buffer object. + * - 1: Pin the given buffer object. + */ + unsigned int pin; +}; + /* * Reply flags */ @@ -878,6 +880,13 @@ struct drm_bo_op_arg { unsigned int pad64; }; +struct drm_bo_set_pin_arg { + union { + struct drm_bo_set_pin_req req; + struct drm_bo_info_rep rep; + } d; +}; + #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 @@ -993,7 +1002,7 @@ struct drm_mm_init_arg { #define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg) #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) - +#define DRM_IOCTL_BO_SET_PIN DRM_IOWR(0xd6, struct drm_bo_set_pin_arg) /*@}*/ From f9c27aa50b715a7d21858f1ce9e4785120bd0c36 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 26 Jul 2007 11:17:41 -0700 Subject: [PATCH 190/437] Copy the important parts of object_validate into object_create(). This should let us allocate buffers without holding the hardware lock. While here, add DRM_DEBUG info for the drm_bo ioctls, so you can see something more specific than just the cmd value per ioctl. --- linux-core/drm_bo.c | 61 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 53885a3e..a2356c8a 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1565,6 +1565,7 @@ int drm_buffer_object_create(struct drm_device *dev, { struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *bo; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret = 0; unsigned long num_pages; @@ -1624,10 +1625,28 @@ int drm_buffer_object_create(struct drm_device *dev, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); + + bo->fence_class = 0; + ret = driver->fence_type(bo, &bo->fence_type); + if (ret) { + DRM_ERROR("Driver did not support given buffer permissions\n"); + goto out_err; + } + + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + goto out_err; + } + + ret = drm_bo_add_ttm(bo); if (ret) goto out_err; + mutex_lock(&dev->struct_mutex); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&bo->mutex); *buf_obj = bo; return 0; @@ -1677,6 +1696,8 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr void __user *curuserarg = NULL; int ret; + DRM_DEBUG("drm_bo_op_ioctl\n"); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1749,14 +1770,15 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_buffer_object *entry; int ret = 0; + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n", + (int)(req->size / 1024), req->page_alignment * 4, req->type); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - - ret = drm_bo_lock_test(dev, file_priv); - if (ret) - goto out; + if (req->type == drm_bo_type_fake) + LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_buffer_object_create(file_priv->head->dev, req->size, req->type, req->mask, @@ -1787,6 +1809,8 @@ int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_user_object *uo; int ret = 0; + DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1810,6 +1834,9 @@ int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_p struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + + DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1827,6 +1854,9 @@ int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file { struct drm_bo_handle_arg *arg = data; int ret; + + DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1845,6 +1875,8 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_user_object *uo; int ret; + DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1867,6 +1899,8 @@ int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_bo_handle_arg *arg = data; int ret = 0; + DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1883,6 +1917,8 @@ int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1901,6 +1937,9 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + + DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1987,6 +2026,9 @@ int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_buffer_object *bo; int ret; + DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n", + req->handle, req->pin); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -2395,6 +2437,9 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n", + arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4)); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2449,6 +2494,8 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2486,6 +2533,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2508,6 +2557,8 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_unlock_ioctl\n"); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; From 3c8ebd94e48589711f44d23e85d713a1ed980f37 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 26 Jul 2007 11:26:12 -0700 Subject: [PATCH 191/437] debug print ioctl return value as -integer rather than fffffwhatever. --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 93dfcdb5..816b8a20 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -648,7 +648,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, err_i1: atomic_dec(&dev->ioctl_count); if (retcode) - DRM_DEBUG("ret = %x\n", retcode); + DRM_DEBUG("ret = %d\n", retcode); return retcode; } EXPORT_SYMBOL(drm_ioctl); From b89cc0346500d9875d4acebc611db8f9ee3463f7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 26 Jul 2007 16:58:59 -0700 Subject: [PATCH 192/437] Eliminate unnecessary (and now wrong) call gto drm_sg_free. --- linux-core/xgi_pcie.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 537e82f5..dc5a50b8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -121,10 +121,6 @@ static int xgi_pcie_lut_init(struct xgi_info * info) void xgi_pcie_lut_cleanup(struct xgi_info * info) { - if (info->dev->sg) { - drm_sg_free(info->dev, info->dev->sg->handle); - } - if (info->lut_handle) { drm_pci_free(info->dev, info->lut_handle); info->lut_handle = NULL; From c37ed9eca57a42b98cc67ca98dbf5135f5ab7aba Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 26 Jul 2007 17:01:16 -0700 Subject: [PATCH 193/437] Eliminate use of DRM_ERR. --- linux-core/xgi_cmdlist.c | 4 ++-- linux-core/xgi_drv.c | 10 +++++----- linux-core/xgi_fb.c | 8 ++++---- linux-core/xgi_pcie.c | 10 +++++----- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 490e9f39..10ee9764 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -144,7 +144,7 @@ int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) sizeof(cmd_list)); if (cmd_list.type > BTYPE_CTRL) { - return DRM_ERR(EINVAL); + return -EINVAL; } xgi_submit_cmdlist(info, &cmd_list); @@ -180,7 +180,7 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, DRM_INFO("Leaving graphical mode (probably X shutting down)\n"); } else { DRM_ERROR("Invalid state change.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 2c3384b0..ec87df0a 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -173,7 +173,7 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) if ((info->fb.base == 0) || (info->fb.size == 0)) { DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->fb.base, info->fb.size); - return DRM_ERR(EINVAL); + return -EINVAL; } @@ -221,7 +221,7 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) maplist = drm_find_matching_map(dev, info->pcie_map); if (maplist == NULL) { DRM_ERROR("Could not find GART backing store map.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } bs.gart = *info->pcie_map; @@ -303,7 +303,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); if (!info) - return DRM_ERR(ENOMEM); + return -ENOMEM; (void) memset(info, 0, sizeof(*info)); dev->dev_private = info; @@ -322,7 +322,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) if ((info->mmio.base == 0) || (info->mmio.size == 0)) { DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->mmio.base, info->mmio.size); - return DRM_ERR(EINVAL); + return -EINVAL; } @@ -339,7 +339,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) SLAB_HWCACHE_ALIGN, NULL, NULL); if (xgi_mem_block_cache == NULL) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 3d3b2ae0..10343c13 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -45,7 +45,7 @@ int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start, block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); if (!block) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } block->offset = start; @@ -189,11 +189,11 @@ int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, if (&block->list == &heap->used_list) { DRM_ERROR("can't find block: 0x%lx to free!\n", offset); - return DRM_ERR(ENOENT); + return -ENOENT; } if (block->filp != filp) { - return DRM_ERR(EPERM); + return -EPERM; } used_block = block; @@ -265,7 +265,7 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, alloc->location = XGI_MEMLOC_LOCAL; alloc->size = 0; DRM_ERROR("Video RAM allocation failed\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } else { DRM_INFO("Video RAM allocation succeeded: 0x%p\n", (char *)block->offset); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index dc5a50b8..4c369a2a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -85,7 +85,7 @@ static int xgi_pcie_lut_init(struct xgi_info * info) DMA_31BIT_MASK); if (info->lut_handle == NULL) { DRM_ERROR("cannot allocate PCIE lut page!\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } lut = info->lut_handle->vaddr; @@ -97,7 +97,7 @@ static int xgi_pcie_lut_init(struct xgi_info * info) DMA_BIDIRECTIONAL); if (dma_mapping_error(info->dev->sg->busaddr[i])) { DRM_ERROR("cannot map GART backing store for DMA!\n"); - return DRM_ERR(-(info->dev->sg->busaddr[i])); + return info->dev->sg->busaddr[i]; } lut[i] = info->dev->sg->busaddr[i]; @@ -184,7 +184,7 @@ int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, alloc->location = XGI_MEMLOC_INVALID; alloc->size = 0; DRM_ERROR("PCIE RAM allocation failed\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } else { DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n", block->offset); @@ -325,7 +325,7 @@ int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) DRM_INFO("input GE HW addr is 0x%x\n", address); if (address == 0) { - return DRM_ERR(EFAULT); + return -EFAULT; } virtaddr = (u32 *)xgi_find_pcie_virt(info, address); @@ -337,7 +337,7 @@ int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) *virtaddr = 0x00f00fff; DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); } else { - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; From f01026eae69e81ae16a69a014ba3bcfb286fc7a4 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 27 Jul 2007 15:48:04 +0200 Subject: [PATCH 194/437] nouveau: creating notifier in PCI memory for PCIGART --- shared-core/nouveau_notifier.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 24a306e8..7a982ba4 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -41,10 +41,13 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, if (dev_priv->agp_heap && dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; + else if ( dev_priv->pci_heap ) + flags = NOUVEAU_MEM_PCI; else flags = NOUVEAU_MEM_FB; flags |= NOUVEAU_MEM_MAPPED; +DRM_DEBUG("Allocating notifier block in %d\n", flags); chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, file_priv); if (!chan->notifier_block) @@ -102,6 +105,8 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, target = NV_DMA_TARGET_VIDMEM; } else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { target = NV_DMA_TARGET_AGP; + } else if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) { + target = NV_DMA_TARGET_PCI_NONLINEAR; } else { DRM_ERROR("Bad DMA target, flags 0x%08x!\n", chan->notifier_block->flags); From cd51f131389297f923798daef6c734ba93f4422b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 27 Jul 2007 15:45:59 -0700 Subject: [PATCH 195/437] Convert to new ioctl interface between core DRM and device-specific module. --- linux-core/xgi_cmdlist.c | 36 ++++++++------------------- linux-core/xgi_drv.c | 53 ++++++++++++++++++---------------------- linux-core/xgi_drv.h | 46 ++++++++++++++++++++-------------- linux-core/xgi_fb.c | 42 +++++++++++-------------------- linux-core/xgi_misc.c | 14 +++++------ linux-core/xgi_pcie.c | 52 ++++++++++++++------------------------- 6 files changed, 100 insertions(+), 143 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 10ee9764..1d0ee754 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -76,9 +76,12 @@ unsigned int get_batch_command(enum xgi_batch_type type) } -static void xgi_submit_cmdlist(struct xgi_info * info, - const struct xgi_cmd_info * pCmdInfo) +int xgi_submit_cmdlist(struct drm_device * dev, void * data, + struct drm_file * filp) { + struct xgi_info *const info = dev->dev_private; + const struct xgi_cmd_info *const pCmdInfo = + (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); u32 begin[4]; @@ -130,24 +133,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); -} - - -int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - struct xgi_cmd_info cmd_list; - struct xgi_info *info = dev->dev_private; - - DRM_COPY_FROM_USER_IOCTL(cmd_list, - (struct xgi_cmd_info __user *) data, - sizeof(cmd_list)); - - if (cmd_list.type > BTYPE_CTRL) { - return -EINVAL; - } - - xgi_submit_cmdlist(info, &cmd_list); return 0; } @@ -187,16 +172,15 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, } -int xgi_state_change_ioctl(DRM_IOCTL_ARGS) +int xgi_state_change_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; - struct xgi_state_info state; + struct xgi_state_info *const state = + (struct xgi_state_info *) data; struct xgi_info *info = dev->dev_private; - DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data, - sizeof(state)); - return xgi_state_change(info, state._toState, state._fromState); + return xgi_state_change(info, state->_toState, state->_fromState); } diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index ec87df0a..0b094a31 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -37,23 +37,23 @@ static struct pci_device_id pciidlist[] = { xgi_PCI_IDS }; -static int xgi_bootstrap(DRM_IOCTL_ARGS); +static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); -static drm_ioctl_desc_t xgi_ioctls[] = { - [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, +static struct drm_ioctl_desc xgi_ioctls[] = { + DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_XGI_FB_ALLOC, xgi_fb_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_FB_FREE, xgi_fb_free_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_XGI_PCIE_ALLOC)] = {xgi_pcie_alloc_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_PCIE_FREE)] = {xgi_pcie_free_ioctl, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_XGI_PCIE_ALLOC, xgi_pcie_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_PCIE_FREE, xgi_pcie_free_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_XGI_GE_RESET)] = {xgi_ge_reset_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_DUMP_REGISTER)] = {xgi_dump_register_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER}, + DRM_IOCTL_DEF(DRM_XGI_GE_RESET, xgi_ge_reset_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_DUMP_REGISTER, xgi_dump_register_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_DEBUG_INFO, xgi_restore_registers_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_TEST_RWINKERNEL, xgi_test_rwinkernel_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), }; static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); @@ -61,8 +61,9 @@ static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static int xgi_driver_load(struct drm_device *dev, unsigned long flags); static int xgi_driver_unload(struct drm_device *dev); -static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp); -static void xgi_driver_lastclose(drm_device_t * dev); +static void xgi_driver_preclose(struct drm_device * dev, + struct drm_file * filp); +static void xgi_driver_lastclose(struct drm_device * dev); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); @@ -139,18 +140,15 @@ MODULE_LICENSE("GPL and additional rights"); void xgi_kern_isr_bh(struct drm_device *dev); -int xgi_bootstrap(DRM_IOCTL_ARGS) +int xgi_bootstrap(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - struct xgi_bootstrap bs; + struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data; struct drm_map_list *maplist; int err; - DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data, - sizeof(bs)); - if (info->mmio_map == NULL) { err = drm_addmap(dev, info->mmio.base, info->mmio.size, _DRM_REGISTERS, _DRM_KERNEL, @@ -187,7 +185,7 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) } - info->pcie.size = bs.gart.size; + info->pcie.size = bs->gart.size; /* Init the resource manager */ if (!info->pcie_heap.initialized) { @@ -224,16 +222,13 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) return -EINVAL; } - bs.gart = *info->pcie_map; - bs.gart.handle = (void *)(unsigned long) maplist->user_token; - DRM_COPY_TO_USER_IOCTL((struct xgi_bootstrap __user *) data, - bs, sizeof(bs)); - + bs->gart = *info->pcie_map; + bs->gart.handle = (void *)(unsigned long) maplist->user_token; return 0; } -void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) +void xgi_driver_preclose(struct drm_device * dev, struct drm_file * filp) { struct xgi_info * info = dev->dev_private; @@ -242,7 +237,7 @@ void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) } -void xgi_driver_lastclose(drm_device_t * dev) +void xgi_driver_lastclose(struct drm_device * dev) { struct xgi_info * info = dev->dev_private; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 2061189a..8dec1fa1 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -52,7 +52,7 @@ struct xgi_mem_block { struct list_head list; unsigned long offset; unsigned long size; - DRMFILE filp; + struct drm_file * filp; unsigned int owner; }; @@ -111,7 +111,7 @@ extern struct kmem_cache *xgi_mem_block_cache; extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, unsigned long size, enum PcieOwner owner); extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - DRMFILE filp); + struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, unsigned int end); extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); @@ -119,34 +119,44 @@ extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - DRMFILE filp); + struct drm_file * filp); extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, - DRMFILE filp); + struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern int xgi_pcie_alloc(struct xgi_info * info, - struct xgi_mem_alloc * alloc, DRMFILE filp); + struct xgi_mem_alloc * alloc, struct drm_file * filp); extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, - DRMFILE filp); + struct drm_file * filp); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE); -extern void xgi_fb_free_all(struct xgi_info *, DRMFILE); +extern void xgi_pcie_free_all(struct xgi_info *, struct drm_file *); +extern void xgi_fb_free_all(struct xgi_info *, struct drm_file *); -extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS); -extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS); -extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS); -extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS); -extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS); -extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS); -extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS); -extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS); -extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS); -extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS); +extern int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_fb_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_dump_register_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_submit_cmdlist(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_state_change_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 10343c13..9c60a874 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -94,7 +94,7 @@ struct xgi_mem_block *xgi_mem_new_node(void) block->offset = 0; block->size = 0; block->owner = PCIE_INVALID; - block->filp = (DRMFILE) -1; + block->filp = (struct drm_file *) -1; return block; } @@ -173,7 +173,7 @@ struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, } int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - DRMFILE filp) + struct drm_file * filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -246,7 +246,7 @@ int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - DRMFILE filp) + struct drm_file * filp) { struct xgi_mem_block *block; @@ -282,29 +282,19 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } -int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS) +int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; - struct xgi_mem_alloc alloc; + struct xgi_mem_alloc *alloc = + (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - int err; - DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, - sizeof(alloc)); - - err = xgi_fb_alloc(info, & alloc, filp); - if (err) { - return err; - } - - DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, - alloc, sizeof(alloc)); - - return 0; + return xgi_fb_alloc(info, alloc, filp); } -int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +int xgi_fb_free(struct xgi_info * info, unsigned long offset, + struct drm_file * filp) { int err = 0; @@ -320,16 +310,12 @@ int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) } -int xgi_fb_free_ioctl(DRM_IOCTL_ARGS) +int xgi_fb_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - u32 offset; - DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, - sizeof(offset)); - - return xgi_fb_free(info, offset, filp); + return xgi_fb_free(info, *(u32 *) data, filp); } @@ -342,7 +328,7 @@ int xgi_fb_heap_init(struct xgi_info * info) /** * Free all blocks associated with a particular file handle. */ -void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp) +void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) { if (!info->fb_heap.initialized) { return; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 5e8c3da8..6c029782 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -27,9 +27,9 @@ #include "xgi_drv.h" #include "xgi_regs.h" -int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS) +int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; xgi_disable_ge(info); @@ -436,25 +436,23 @@ void xgi_dump_register(struct xgi_info * info) } -int xgi_dump_register_ioctl(DRM_IOCTL_ARGS) +int xgi_dump_register_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; xgi_dump_register(info); - return 0; } -int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS) +int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; OUT3X5B(info->mmio_map, 0x13, 0); OUT3X5B(info->mmio_map, 0x8b, 2); - return 0; } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 4c369a2a..b91471b8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -33,7 +33,7 @@ static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; static int xgi_pcie_free_locked(struct xgi_info * info, - unsigned long offset, DRMFILE filp); + unsigned long offset, struct drm_file * filp); static int xgi_pcie_lut_init(struct xgi_info * info) { @@ -148,7 +148,7 @@ int xgi_pcie_heap_init(struct xgi_info * info) int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - DRMFILE filp) + struct drm_file * filp) { struct xgi_mem_block *block; @@ -199,32 +199,21 @@ int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } -int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS) +int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; - struct xgi_mem_alloc alloc; + struct xgi_mem_alloc *const alloc = + (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - int err; - DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, - sizeof(alloc)); - - err = xgi_pcie_alloc(info, & alloc, filp); - if (err) { - return err; - } - - DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, - alloc, sizeof(alloc)); - - return 0; + return xgi_pcie_alloc(info, alloc, filp); } /** * Free all blocks associated with a particular file handle. */ -void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) +void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) { if (!info->pcie_heap.initialized) { return; @@ -252,8 +241,8 @@ void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) } -int xgi_pcie_free_locked(struct xgi_info * info, - unsigned long offset, DRMFILE filp) +int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, + struct drm_file * filp) { const bool isvertex = (xgi_pcie_vertex_block && (xgi_pcie_vertex_block->offset == offset)); @@ -266,7 +255,8 @@ int xgi_pcie_free_locked(struct xgi_info * info, } -int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +int xgi_pcie_free(struct xgi_info * info, unsigned long offset, + struct drm_file * filp) { int err; @@ -282,16 +272,12 @@ int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) } -int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS) +int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - u32 offset; - DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, - sizeof(offset)); - - return xgi_pcie_free(info, offset, filp); + return xgi_pcie_free(info, *(u32 *) data, filp); } @@ -312,15 +298,13 @@ void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) /* address -- GE hw address */ -int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) +int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - u32 address; + u32 address = *(u32 *) data; u32 *virtaddr = 0; - DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data, - sizeof(address)); DRM_INFO("input GE HW addr is 0x%x\n", address); From 2ac80e79e424aa6577e556b2df01caea9e480852 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 09:59:19 -0700 Subject: [PATCH 196/437] Use OUT3C5B macro instead of assuming little-endian byte order. --- linux-core/xgi_regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 34268a56..b3a47f8e 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -130,7 +130,7 @@ static inline void xgi_enable_ge(struct xgi_info * info) int wait = 0; // Enable GE - DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211); + OUT3C5B(info->mmio_map, 0x11, 0x92); // Save and close dynamic gating bOld3cf2a = IN3CFB(info->mmio_map, 0x2a); From 01628a430d476f5875270d7137fc083ba85cef90 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 10:02:46 -0700 Subject: [PATCH 197/437] Use DRM_READ/DRM_WRITE macros instead of directly accessing MMIO space. --- linux-core/xgi_misc.c | 119 ++++++++++++++++++++++++------------------ 1 file changed, 68 insertions(+), 51 deletions(-) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 6c029782..c75a5841 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -27,6 +27,8 @@ #include "xgi_drv.h" #include "xgi_regs.h" +#include + int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, struct drm_file * filp) { @@ -46,47 +48,43 @@ int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, static unsigned int s_invalid_begin = 0; -static bool xgi_validate_signal(volatile u8 *mmio_vbase) +static bool xgi_validate_signal(struct drm_map * map) { - volatile u32 *const ge_3d_status = - (volatile u32 *)(mmio_vbase + 0x2800); - const u32 old_ge_status = ge_3d_status[0x00]; - - if (old_ge_status & 0x001c0000) { + if (DRM_READ32(map, 0x2800) & 0x001c0000) { u16 check; /* Check Read back status */ - *(mmio_vbase + 0x235c) = 0x80; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x80); + check = DRM_READ16(map, 0x2360); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { return FALSE; } /* Check RO channel */ - *(mmio_vbase + 0x235c) = 0x83; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x83); + check = DRM_READ16(map, 0x2360); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RW channel */ - *(mmio_vbase + 0x235c) = 0x88; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x88); + check = DRM_READ16(map, 0x2360); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RO channel outstanding */ - *(mmio_vbase + 0x235c) = 0x8f; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x8f); + check = DRM_READ16(map, 0x2360); if (0 != (check & 0x3ff)) { return FALSE; } /* Check RW channel outstanding */ - *(mmio_vbase + 0x235c) = 0x90; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x90); + check = DRM_READ16(map, 0x2360); if (0 != (check & 0x3ff)) { return FALSE; } @@ -98,14 +96,12 @@ static bool xgi_validate_signal(volatile u8 *mmio_vbase) } -static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) +static void xgi_ge_hang_reset(struct drm_map * map) { - volatile u32 *const ge_3d_status = - (volatile u32 *)(mmio_vbase + 0x2800); int time_out = 0xffff; - *(mmio_vbase + 0xb057) = 8; - while (0 != (ge_3d_status[0x00] & 0xf0000000)) { + DRM_WRITE8(map, 0xb057, 8); + while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) { while (0 != ((--time_out) & 0xfff)) /* empty */ ; @@ -116,57 +112,53 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) u8 old_36; DRM_INFO("Can not reset back 0x%x!\n", - ge_3d_status[0x00]); + DRM_READ32(map, 0x2800)); - *(mmio_vbase + 0xb057) = 0; + DRM_WRITE8(map, 0xb057, 0); /* Have to use 3x5.36 to reset. */ /* Save and close dynamic gating */ - old_3ce = *(mmio_vbase + 0x3ce); - *(mmio_vbase + 0x3ce) = 0x2a; - old_3cf = *(mmio_vbase + 0x3cf); - *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; + old_3ce = DRM_READ8(map, 0x3ce); + DRM_WRITE8(map, 0x3ce, 0x2a); + old_3cf = DRM_READ8(map, 0x3cf); + DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe); /* Reset GE */ - old_index = *(mmio_vbase + 0x3d4); - *(mmio_vbase + 0x3d4) = 0x36; - old_36 = *(mmio_vbase + 0x3d5); - *(mmio_vbase + 0x3d5) = old_36 | 0x10; - + old_index = DRM_READ8(map, 0x3d4); + DRM_WRITE8(map, 0x3d4, 0x36); + old_36 = DRM_READ8(map, 0x3d5); + DRM_WRITE8(map, 0x3d5, old_36 | 0x10); + while (0 != ((--time_out) & 0xfff)) /* empty */ ; - *(mmio_vbase + 0x3d5) = old_36; - *(mmio_vbase + 0x3d4) = old_index; + DRM_WRITE8(map, 0x3d5, old_36); + DRM_WRITE8(map, 0x3d4, old_index); /* Restore dynamic gating */ - *(mmio_vbase + 0x3cf) = old_3cf; - *(mmio_vbase + 0x3ce) = old_3ce; + DRM_WRITE8(map, 0x3cf, old_3cf); + DRM_WRITE8(map, 0x3ce, old_3ce); break; } } - *(mmio_vbase + 0xb057) = 0; + DRM_WRITE8(map, 0xb057, 0); } bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile u8 *const mmio_vbase = info->mmio_map->handle; - volatile u32 *const ge_3d_status = - (volatile u32 *)(mmio_vbase + 0x2800); - const u32 int_status = ge_3d_status[4]; + const u32 int_status = DRM_READ32(info->mmio_map, 0x2810); bool is_support_auto_reset = FALSE; /* Check GE on/off */ if (0 == (0xffffc0f0 & int_status)) { - u32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; - if (0 != (0x1000 & int_status)) { /* We got GE stall interrupt. */ - ge_3d_status[0x04] = int_status | 0x04000000; + DRM_WRITE32(info->mmio_map, 0x2810, + int_status | 0x04000000); if (is_support_auto_reset) { static cycles_t last_tick; @@ -174,7 +166,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* OE II is busy. */ - if (!xgi_validate_signal(mmio_vbase)) { + if (!xgi_validate_signal(info->mmio_map)) { /* Nothing but skip. */ } else if (0 == continue_int_count++) { last_tick = get_cycles(); @@ -189,13 +181,14 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* GE Hung up, need reset. */ DRM_INFO("Reset GE!\n"); - xgi_ge_hang_reset(mmio_vbase); + xgi_ge_hang_reset(info->mmio_map); } } } } else if (0 != (0x1 & int_status)) { s_invalid_begin++; - ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; + DRM_WRITE32(info->mmio_map, 0x2810, + (int_status & ~0x01) | 0x04000000); } return TRUE; @@ -456,14 +449,38 @@ int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, return 0; } + +#define WHOLD_GE_STATUS 0x2800 + +/* Test everything except the "whole GE busy" bit, the "master engine busy" + * bit, and the reserved bits [26:21]. + */ +#define IDLE_MASK ~((1U<<31) | (1U<<28) | (0x3f<<21)) + void xgi_waitfor_pci_idle(struct xgi_info * info) { -#define WHOLD_GE_STATUS 0x2800 -#define IDLE_MASK ~0x90200000 + unsigned int idleCount = 0; + u32 old_status = 0; + unsigned int same_count = 0; - int idleCount = 0; while (idleCount < 5) { - if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) { + const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) + & IDLE_MASK; + + if (status == old_status) { + same_count++; + + if ((same_count % 100) == 0) { + DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n", + old_status, same_count); + } + } else { + old_status = status; + same_count = 0; + } + + if (status != 0) { + msleep(1); idleCount = 0; } else { idleCount++; From 2fc697a7d270d57463eb5a16a0c65bd8e14c9893 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 10:20:15 -0700 Subject: [PATCH 198/437] Fix GE shut-down sequence. When the GE is shut down, an empty command packet without a begin-link must be sent. After this command is sent, wait for the hardware to go idle. Finally, turn off the GE and disable MMIO. --- linux-core/xgi_cmdlist.c | 62 ++++++++++++++++++++++++++-------------- linux-core/xgi_drv.c | 8 ++++-- 2 files changed, 47 insertions(+), 23 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 1d0ee754..4bb147c4 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -29,7 +29,7 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" -static void addFlush2D(struct xgi_info * info); +static void xgi_emit_flush(struct xgi_info * info, bool link); static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); @@ -120,7 +120,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { - addFlush2D(info); + xgi_emit_flush(info, TRUE); } info->cmdring.last_ptr[1] = begin[1]; @@ -190,9 +190,18 @@ void xgi_cmdlist_reset(struct xgi_info * info) info->cmdring.ring_offset = 0; } + void xgi_cmdlist_cleanup(struct xgi_info * info) { if (info->cmdring.ring_hw_base != 0) { + /* If command lists have been issued, terminate the command + * list chain with a flush command. + */ + if (info->cmdring.last_ptr != NULL) { + xgi_emit_flush(info, FALSE); + xgi_waitfor_pci_idle(info); + } + xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL); info->cmdring.ring_hw_base = 0; info->cmdring.ring_offset = 0; @@ -210,32 +219,43 @@ static void triggerHWCommandList(struct xgi_info * info) } -static void addFlush2D(struct xgi_info * info) +/** + * Emit a flush to the CRTL command stream. + * @info XGI info structure + * @link Emit (or don't emit) link information at start of flush command. + * + * This function assumes info->cmdring.ptr is non-NULL. + */ +static void xgi_emit_flush(struct xgi_info * info, bool link) { - u32 *flushBatchVirtAddr; - u32 flushBatchHWAddr; + static const u32 flush_command[8] = { + (0x10 << 24), + BEGIN_LINK_ENABLE_MASK | (0x00004), + 0x00000000, 0x00000000, + + /* Flush everything with the default 32 clock delay. + */ + 0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff + }; + const unsigned int base = (link) ? 0 : 4; + const unsigned int flush_size = (8 - base) * sizeof(u32); + u32 *batch_addr; + u32 hw_addr; /* check buf is large enough to contain a new flush batch */ - if ((info->cmdring.ring_offset + 0x20) >= info->cmdring.size) { + if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) { info->cmdring.ring_offset = 0; } - flushBatchHWAddr = info->cmdring.ring_hw_base + info->cmdring.ring_offset; - flushBatchVirtAddr = info->cmdring.ptr + hw_addr = info->cmdring.ring_hw_base + + info->cmdring.ring_offset; + batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - /* not using memcpy for I assume the address is discrete */ - *(flushBatchVirtAddr + 0) = 0x10000000; - *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ - *(flushBatchVirtAddr + 2) = 0x00000000; - *(flushBatchVirtAddr + 3) = 0x00000000; - *(flushBatchVirtAddr + 4) = FLUSH_2D; - *(flushBatchVirtAddr + 5) = FLUSH_2D; - *(flushBatchVirtAddr + 6) = FLUSH_2D; - *(flushBatchVirtAddr + 7) = FLUSH_2D; + (void) memcpy(batch_addr, & flush_command[base], flush_size); - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; - info->cmdring.last_ptr[2] = flushBatchHWAddr >> 4; + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); + info->cmdring.last_ptr[2] = hw_addr >> 4; info->cmdring.last_ptr[3] = 0; wmb(); info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) @@ -243,6 +263,6 @@ static void addFlush2D(struct xgi_info * info) triggerHWCommandList(info); - info->cmdring.ring_offset += 0x20; - info->cmdring.last_ptr = flushBatchVirtAddr; + info->cmdring.ring_offset += flush_size; + info->cmdring.last_ptr = (link) ? batch_addr : NULL; } diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 0b094a31..201062ee 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -242,6 +242,12 @@ void xgi_driver_lastclose(struct drm_device * dev) struct xgi_info * info = dev->dev_private; if (info != NULL) { + if (info->mmio_map != NULL) { + xgi_cmdlist_cleanup(info); + xgi_disable_ge(info); + xgi_disable_mmio(info); + } + /* The core DRM lastclose routine will destroy all of our * mappings for us. NULL out the pointers here so that * xgi_bootstrap can do the right thing. @@ -250,8 +256,6 @@ void xgi_driver_lastclose(struct drm_device * dev) info->mmio_map = NULL; info->fb_map = NULL; - xgi_cmdlist_cleanup(info); - if (info->fb_heap.initialized) { xgi_mem_heap_cleanup(&info->fb_heap); } From 08919d8a70558dc61c430be5ed6e4a2bed7429b2 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 12:01:52 -0700 Subject: [PATCH 199/437] Move additional GE initialization into the kernel. This code comes directly from the X server. --- linux-core/xgi_drv.c | 52 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 201062ee..33b3a51d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -138,6 +138,57 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); +void xgi_engine_init(struct xgi_info * info) +{ + u8 temp; + + + OUT3C5B(info->mmio_map, 0x11, 0x92); + + /* -------> copy from OT2D + * PCI Retry Control Register. + * disable PCI read retry & enable write retry in mem. (10xx xxxx)b + */ + temp = IN3X5B(info->mmio_map, 0x55); + OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80); + + xgi_enable_ge(info); + + /* Enable linear addressing of the card. */ + temp = IN3X5B(info->mmio_map, 0x21); + OUT3X5B(info->mmio_map, 0x21, temp | 0x20); + + /* Enable 32-bit internal data path */ + temp = IN3X5B(info->mmio_map, 0x2A); + OUT3X5B(info->mmio_map, 0x2A, temp | 0x40); + + /* Enable PCI burst write ,disable burst read and enable MMIO. */ + /* + * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO. + * 7 ---- Pixel Data Format 1: big endian 0: little endian + * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format + * 2 ---- PCI Burst Write Enable + * 1 ---- PCI Burst Read Enable + * 0 ---- MMIO Control + */ + temp = IN3X5B(info->mmio_map, 0x39); + OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd); + + /* enable GEIO decode */ + /* temp = IN3X5B(info->mmio_map, 0x29); + * OUT3X5B(info->mmio_map, 0x29, temp | 0x08); + */ + + /* Enable graphic engine I/O PCI retry function*/ + /* temp = IN3X5B(info->mmio_map, 0x62); + * OUT3X5B(info->mmio_map, 0x62, temp | 0x50); + */ + + /* protect all register except which protected by 3c5.0e.7 */ + /* OUT3C5B(info->mmio_map, 0x11, 0x87); */ +} + + void xgi_kern_isr_bh(struct drm_device *dev); int xgi_bootstrap(struct drm_device * dev, void * data, @@ -159,6 +210,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, } xgi_enable_mmio(info); + xgi_engine_init(info); } From 283eaa25594347267df4e6e5eedbb9d17bb3682c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 31 Jul 2007 09:22:45 +1000 Subject: [PATCH 200/437] drm: fix fencing refcount error This extra increase was causing fence leaks on my system, due to create/user add already increasing it twice no need for a 3rd go. --- linux-core/drm_fence.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index c4f7da15..fdb2a4db 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -597,7 +597,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * * usage > 0. No need to lock dev->struct_mutex; */ - atomic_inc(&fence->usage); arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); @@ -830,7 +829,7 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; - atomic_inc(&fence->usage); + arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); From c395d27a725f170645704bfc0d27b1e935b53c83 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 25 Jul 2007 14:32:15 +1000 Subject: [PATCH 201/437] drm/fence: shut up lockdep --- linux-core/drm_fence.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index fdb2a4db..2f16f7ef 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -520,9 +520,10 @@ void drm_fence_manager_init(struct drm_device * dev) struct drm_fence_class_manager *class; struct drm_fence_driver *fed = dev->driver->fence_driver; int i; + unsigned long flags; rwlock_init(&fm->lock); - write_lock(&fm->lock); + write_lock_irqsave(&fm->lock, flags); fm->initialized = 0; if (!fed) goto out_unlock; @@ -541,7 +542,7 @@ void drm_fence_manager_init(struct drm_device * dev) atomic_set(&fm->count, 0); out_unlock: - write_unlock(&fm->lock); + write_unlock_irqrestore(&fm->lock, flags); } void drm_fence_manager_takedown(struct drm_device * dev) From f83000c8b388f18f677238b9342fd6a7e262394b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 31 Jul 2007 17:27:00 -0700 Subject: [PATCH 202/437] Refactor register dumping code. --- linux-core/xgi_misc.c | 225 +++++++++++------------------------------- 1 file changed, 56 insertions(+), 169 deletions(-) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index c75a5841..84d1d4f2 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -254,178 +254,65 @@ bool xgi_dvi_irq_handler(struct xgi_info * info) } +static void dump_reg_header(unsigned regbase) +{ + printk("\n=====xgi_dump_register========0x%x===============\n", + regbase); + printk(" 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); +} + + +static void dump_indexed_reg(struct xgi_info * info, unsigned regbase) +{ + unsigned i, j; + u8 temp; + + + dump_reg_header(regbase); + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + DRM_WRITE8(info->mmio_map, regbase - 1, + (i * 0x10) + j); + temp = DRM_READ8(info->mmio_map, regbase); + printk("%3x", temp); + } + printk("\n"); + } +} + + +static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range) +{ + unsigned i, j; + + + dump_reg_header(regbase); + for (i = 0; i < range; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + u8 temp = DRM_READ8(info->mmio_map, + regbase + (i * 0x10) + j); + printk("%3x", temp); + } + printk("\n"); + } +} + + void xgi_dump_register(struct xgi_info * info) { - int i, j; - unsigned char temp; + dump_indexed_reg(info, 0x3c5); + dump_indexed_reg(info, 0x3d5); + dump_indexed_reg(info, 0x3cf); - // 0x3C5 - printk("\r\n=====xgi_dump_register========0x%x===============\r\n", - 0x3C5); - - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = IN3C5B(info->mmio_map, i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3D5 - printk("\r\n====xgi_dump_register=========0x%x===============\r\n", - 0x3D5); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = IN3X5B(info->mmio_map, i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3CF - printk("\r\n=========xgi_dump_register====0x%x===============\r\n", - 0x3CF); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = IN3CFB(info->mmio_map, i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n=====xgi_dump_register======0x%x===============\r\n", - 0xB000); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x5; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2200); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0xB; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2300); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x7; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2400); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2800); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } + dump_reg(info, 0xB000, 0x05); + dump_reg(info, 0x2200, 0x0B); + dump_reg(info, 0x2300, 0x07); + dump_reg(info, 0x2400, 0x10); + dump_reg(info, 0x2800, 0x10); } From 7602e4f8a67d777437502672b4f74d9b990535ce Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 2 Aug 2007 19:13:45 +1000 Subject: [PATCH 203/437] drm: add unlocked ioctl code path - not used yet --- linux-core/drmP.h | 2 ++ linux-core/drm_drv.c | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2b7e0a44..a61efcff 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -911,6 +911,8 @@ extern void drm_exit(struct drm_driver *driver); extern void drm_cleanup_pci(struct pci_dev *pdev); extern int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern long drm_unlocked_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index cc676bda..bb15987e 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -573,6 +573,12 @@ static int drm_version(struct drm_device *dev, void *data, */ int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +{ + return drm_unlocked_ioctl(filp, cmd, arg); +} +EXPORT_SYMBOL(drm_ioctl); + +long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->head->dev; @@ -650,7 +656,7 @@ err_i1: DRM_DEBUG("ret = %x\n", retcode); return retcode; } -EXPORT_SYMBOL(drm_ioctl); +EXPORT_SYMBOL(drm_unlocked_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { From 405c48b857a967c1174b27a5db975668e1d6a9f8 Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Thu, 2 Aug 2007 20:06:37 +0200 Subject: [PATCH 204/437] Add libdrm source dir, to build tests from a different build dir --- tests/Makefile.am | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/Makefile.am b/tests/Makefile.am index 3b97fb79..38a07a35 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,5 +1,6 @@ AM_CFLAGS = \ - -I $(top_srcdir)/shared-core + -I $(top_srcdir)/shared-core \ + -I $(top_srcdir)/libdrm noinst_PROGRAMS = \ dristat \ @@ -20,4 +21,5 @@ TESTS = openclose \ updatedraw EXTRA_PROGRAMS = $(TESTS) -CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES) \ No newline at end of file +CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES) + From cf4f1a85af69c2c2e5ba9c822d30863f16ce6821 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 2 Aug 2007 13:51:55 -0700 Subject: [PATCH 205/437] Add a couple of doxygen comments from reading the code. --- linux-core/drm_bo.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a2356c8a..4ce5f480 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -705,6 +705,10 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, return ret; } +/** + * Repeatedly evict memory from the LRU for @mem_type until we create enough + * space, or we've evicted everything and there isn't enough space. + */ static int drm_bo_mem_force_space(struct drm_device * dev, struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) @@ -791,6 +795,14 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, return 1; } +/** + * Creates space for memory region @mem according to its type. + * + * This function first searches for free space in compatible memory types in + * the priority order defined by the driver. If free space isn't found, then + * drm_bo_mem_force_space is attempted in priority order to evict and find + * space. + */ int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait) { @@ -2406,8 +2418,7 @@ int drm_bo_driver_init(struct drm_device * dev) * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ - - ret = drm_bo_init_mm(dev, 0, 0, 0); + ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0); if (ret) goto out_unlock; From 3a0bc518e35c62bb9c64c9105f836584d949653f Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 2 Aug 2007 14:08:04 -0700 Subject: [PATCH 206/437] Remove the pinned buffer from the LRU when pinning. Also, be a little safer with setting the pinned flag within the struct lock. I'm not 100% sure if this is required, but it seems like it might be. --- linux-core/Makefile | 1 + linux-core/drm_bo.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/linux-core/Makefile b/linux-core/Makefile index 1758777c..46c821a4 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -268,6 +268,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif +EXTRA_CFLAGS += -g -O0 # Start with all modules turned off. CONFIG_DRM_GAMMA := n diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4ce5f480..53fb5afc 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1990,8 +1990,8 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Validate the buffer into its pinned location, with no pending - * fence. + /* Validate the buffer into its pinned location, with no + * pending fence. */ ret = drm_buffer_object_validate(bo, 0, 0, 0); if (ret) { @@ -1999,9 +1999,12 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Add our buffer to the pinned list */ + /* Pull the buffer off of the LRU and add it to the pinned + * list + */ bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); list_del_init(&bo->pinned_lru); drm_bo_add_to_pinned_lru(bo); @@ -2011,6 +2014,7 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, bo->pinned_node = bo->mem.mm_node; } + bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } else { @@ -2022,9 +2026,9 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, list_del_init(&bo->pinned_lru); bo->pinned_node = NULL; + bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } - bo->pinned = pin; mutex_unlock(&bo->mutex); return 0; } From 2453ba19b6f9956ea5d412a66d5d33c8a8b301b2 Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Fri, 3 Aug 2007 23:06:39 +0200 Subject: [PATCH 207/437] nouveau:nv10: fill and use load,save graph context functions --- shared-core/nv10_graph.c | 55 +++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 930fcbdf..ce1cbfa7 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -544,24 +544,42 @@ static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) return -1; } -static void restore_ctx_regs(struct drm_device *dev, int channel) +int nv10_graph_load_context(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *fifo = dev_priv->fifos[channel]; int i, j; + for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]); if (dev_priv->chipset>=0x17) { for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) NV_WRITE(nv17_graph_ctx_regs[j], fifo->pgraph_ctx[i]); } - nouveau_wait_for_idle(dev); + + return 0; +} + +int nv10_graph_save_context(struct drm_device *dev, int channel) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo *fifo = dev_priv->fifos[channel]; + int i, j; + + for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) + fifo->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); + if (dev_priv->chipset>=0x17) { + for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) + fifo->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); + } + + return 0; } void nouveau_nv10_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int channel, channel_old, i, j; + int channel, channel_old; channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); @@ -574,14 +592,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); NV_WRITE(NV_PFIFO_CACHES, 0x00000000); #endif - - // save PGRAPH context - for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) - dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); - if (dev_priv->chipset>=0x17) { - for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) - dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); - } + nv10_graph_save_context(dev, channel_old); nouveau_wait_for_idle(dev); @@ -589,15 +600,12 @@ void nouveau_nv10_context_switch(struct drm_device *dev) NV_WRITE(NV10_PGRAPH_CTX_USER, (NV_READ(NV10_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24)); nouveau_wait_for_idle(dev); - // restore PGRAPH context -#if 1 - restore_ctx_regs(dev, channel); -#endif + + nv10_graph_load_context(dev, channel); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); - #if 0 NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); @@ -611,6 +619,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) if (offset > 0) \ fifo->pgraph_ctx[offset] = val; \ } while (0) + int nv10_graph_create_context(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *fifo = dev_priv->fifos[channel]; @@ -654,7 +663,7 @@ int nv10_graph_create_context(struct drm_device *dev, int channel) { /* for the first channel init the regs */ if (dev_priv->fifo_alloc_count == 0) - restore_ctx_regs(dev, channel); + nv10_graph_load_context(dev, channel); //XXX should be saved/restored for each fifo @@ -667,18 +676,6 @@ void nv10_graph_destroy_context(struct drm_device *dev, int channel) { } -int nv10_graph_load_context(struct drm_device *dev, int channel) -{ - DRM_ERROR("stub!\n"); - return 0; -} - -int nv10_graph_save_context(struct drm_device *dev, int channel) -{ - DRM_ERROR("stub!\n"); - return 0; -} - int nv10_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; From beaa0c9a28b30a6ba3292184d04875b6a597e433 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 03:40:43 +1000 Subject: [PATCH 208/437] nouveau: Pass channel struct around instead of channel id. --- linux-core/nouveau_sgdma.c | 2 +- shared-core/nouveau_drv.h | 391 ++++++++++++++++++--------------- shared-core/nouveau_fifo.c | 104 +++++---- shared-core/nouveau_notifier.c | 42 ++-- shared-core/nouveau_object.c | 150 ++++++------- shared-core/nouveau_state.c | 7 +- shared-core/nv04_fifo.c | 29 ++- shared-core/nv04_graph.c | 15 +- shared-core/nv10_fifo.c | 27 ++- shared-core/nv10_graph.c | 49 +++-- shared-core/nv20_graph.c | 68 +++--- shared-core/nv30_graph.c | 35 ++- shared-core/nv40_fifo.c | 24 +- shared-core/nv40_graph.c | 26 +-- shared-core/nv50_fifo.c | 41 ++-- shared-core/nv50_graph.c | 27 ++- 16 files changed, 516 insertions(+), 521 deletions(-) diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 0ddac952..6393a469 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -211,7 +211,7 @@ nouveau_sgdma_init(struct drm_device *dev) obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; } - if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, + if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, NVOBJ_FLAG_ALLOW_NO_REFS | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index dd323a0b..8ec91898 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -92,8 +92,11 @@ struct nouveau_gpuobj_ref { int handle; }; -struct nouveau_fifo +struct nouveau_channel { + struct drm_device *dev; + int id; + /* owner of this fifo */ struct drm_file *file_priv; /* mapping of the fifo itself */ @@ -136,57 +139,64 @@ struct nouveau_config { } cmdbuf; }; -struct nouveau_engine_func { - struct { - void *priv; +struct nouveau_instmem_engine { + void *priv; - int (*init)(struct drm_device *dev); - void (*takedown)(struct drm_device *dev); + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); - int (*populate)(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); - void (*clear)(struct drm_device *, struct nouveau_gpuobj *); - int (*bind)(struct drm_device *, struct nouveau_gpuobj *); - int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); - } instmem; + int (*populate)(struct drm_device *, struct nouveau_gpuobj *, + uint32_t *size); + void (*clear)(struct drm_device *, struct nouveau_gpuobj *); + int (*bind)(struct drm_device *, struct nouveau_gpuobj *); + int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); +}; - struct { - int (*init)(struct drm_device *dev); - void (*takedown)(struct drm_device *dev); - } mc; +struct nouveau_mc_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); +}; - struct { - int (*init)(struct drm_device *dev); - uint64_t (*read)(struct drm_device *dev); - void (*takedown)(struct drm_device *dev); - } timer; +struct nouveau_timer_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); + uint64_t (*read)(struct drm_device *dev); +}; - struct { - int (*init)(struct drm_device *dev); - void (*takedown)(struct drm_device *dev); - } fb; +struct nouveau_fb_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); +}; - struct { - int (*init)(struct drm_device *); - void (*takedown)(struct drm_device *); +struct nouveau_fifo_engine { + void *priv; - int (*create_context)(struct drm_device *, int channel); - void (*destroy_context)(struct drm_device *, int channel); - int (*load_context)(struct drm_device *, int channel); - int (*save_context)(struct drm_device *, int channel); - } graph; + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); - struct { - void *priv; + int (*create_context)(struct nouveau_channel *); + void (*destroy_context)(struct nouveau_channel *); + int (*load_context)(struct nouveau_channel *); + int (*save_context)(struct nouveau_channel *); +}; - int (*init)(struct drm_device *); - void (*takedown)(struct drm_device *); +struct nouveau_pgraph_engine { + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); - int (*create_context)(struct drm_device *, int channel); - void (*destroy_context)(struct drm_device *, int channel); - int (*load_context)(struct drm_device *, int channel); - int (*save_context)(struct drm_device *, int channel); - } fifo; + int (*create_context)(struct nouveau_channel *); + void (*destroy_context)(struct nouveau_channel *); + int (*load_context)(struct nouveau_channel *); + int (*save_context)(struct nouveau_channel *); +}; + +struct nouveau_engine { + struct nouveau_instmem_engine instmem; + struct nouveau_mc_engine mc; + struct nouveau_timer_engine timer; + struct nouveau_fb_engine fb; + struct nouveau_pgraph_engine graph; + struct nouveau_fifo_engine fifo; }; struct drm_nouveau_private { @@ -207,9 +217,9 @@ struct drm_nouveau_private { drm_local_map_t *ramin; /* NV40 onwards */ int fifo_alloc_count; - struct nouveau_fifo *fifos[NV_MAX_FIFO_NUMBER]; + struct nouveau_channel *fifos[NV_MAX_FIFO_NUMBER]; - struct nouveau_engine_func Engine; + struct nouveau_engine Engine; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ struct nouveau_gpuobj *ramht; @@ -262,93 +272,108 @@ struct drm_nouveau_private { struct nouveau_gpuobj *gpuobj_all; }; +#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \ + struct drm_nouveau_private *nv = dev->dev_private; \ + if (!nouveau_fifo_owner(dev, (cl), (id))) { \ + DRM_ERROR("pid %d doesn't own channel %d\n", \ + DRM_CURRENTPID, (id)); \ + return -EPERM; \ + } \ + (ch) = nv->fifos[(id)]; \ +} while(0) + /* nouveau_state.c */ -extern void nouveau_preclose(struct drm_device * dev, - struct drm_file *file_priv); -extern int nouveau_load(struct drm_device *dev, unsigned long flags); -extern int nouveau_firstopen(struct drm_device *dev); -extern void nouveau_lastclose(struct drm_device *dev); -extern int nouveau_unload(struct drm_device *dev); -extern int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void nouveau_wait_for_idle(struct drm_device *dev); -extern int nouveau_ioctl_card_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); +extern int nouveau_load(struct drm_device *, unsigned long flags); +extern int nouveau_firstopen(struct drm_device *); +extern void nouveau_lastclose(struct drm_device *); +extern int nouveau_unload(struct drm_device *); +extern int nouveau_ioctl_getparam(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_setparam(struct drm_device *, void *data, + struct drm_file *); +extern void nouveau_wait_for_idle(struct drm_device *); +extern int nouveau_ioctl_card_init(struct drm_device *, void *data, + struct drm_file *); /* nouveau_mem.c */ -extern int nouveau_mem_init_heap(struct mem_block **, - uint64_t start, uint64_t size); +extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, + uint64_t size); extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, uint64_t size, int align2, - struct drm_file *file_priv); -extern void nouveau_mem_takedown(struct mem_block **heap); -extern void nouveau_mem_free_block(struct mem_block *); -extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); -extern void nouveau_mem_release(struct drm_file *file_priv, - struct mem_block *heap); -extern int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, + struct drm_file *); +extern void nouveau_mem_takedown(struct mem_block **heap); +extern void nouveau_mem_free_block(struct mem_block *); +extern uint64_t nouveau_mem_fb_amount(struct drm_device *); +extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); +extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_mem_free(struct drm_device *, void *data, + struct drm_file *); +extern struct mem_block* nouveau_mem_alloc(struct drm_device *, int alignment, uint64_t size, - int flags, - struct drm_file *file_priv); -extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*); -extern int nouveau_mem_init(struct drm_device *dev); -extern void nouveau_mem_close(struct drm_device *dev); + int flags, struct drm_file *); +extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*); +extern int nouveau_mem_init(struct drm_device *); +extern void nouveau_mem_close(struct drm_device *); /* nouveau_notifier.c */ -extern int nouveau_notifier_init_channel(struct drm_device *, int channel, - struct drm_file *file_priv); -extern void nouveau_notifier_takedown_channel(struct drm_device *, int channel); -extern int nouveau_notifier_alloc(struct drm_device *, int channel, - uint32_t handle, int cout, uint32_t *offset); -extern int nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int nouveau_notifier_init_channel(struct nouveau_channel *); +extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); +extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, + int cout, uint32_t *offset); +extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, + struct drm_file *); /* nouveau_fifo.c */ -extern int nouveau_fifo_init(struct drm_device *dev); -extern int nouveau_fifo_number(struct drm_device *dev); -extern int nouveau_fifo_ctx_size(struct drm_device *dev); -extern void nouveau_fifo_cleanup(struct drm_device *dev, - struct drm_file *file_priv); -extern int nouveau_fifo_owner(struct drm_device *dev, - struct drm_file *file_priv, int channel); -extern void nouveau_fifo_free(struct drm_device *dev, int channel); +extern int nouveau_fifo_init(struct drm_device *); +extern int nouveau_fifo_number(struct drm_device *); +extern int nouveau_fifo_ctx_size(struct drm_device *); +extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *); +extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, + int channel); +extern void nouveau_fifo_free(struct nouveau_channel *); /* nouveau_object.c */ -extern void nouveau_gpuobj_takedown(struct drm_device *dev); -extern int nouveau_gpuobj_channel_init(struct drm_device *, int channel, +extern void nouveau_gpuobj_takedown(struct drm_device *); +extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); -extern void nouveau_gpuobj_channel_takedown(struct drm_device *, int channel); -extern int nouveau_gpuobj_new(struct drm_device *, int channel, int size, int align, - uint32_t flags, struct nouveau_gpuobj **); +extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); +extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, + int size, int align, uint32_t flags, + struct nouveau_gpuobj **); extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_ref_add(struct drm_device *, int channel, uint32_t handle, - struct nouveau_gpuobj *, +extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, + uint32_t handle, struct nouveau_gpuobj *, struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_ref_del(struct drm_device *, struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_new_ref(struct drm_device *, int chan_obj, int chan_ref, +extern int nouveau_gpuobj_ref_del(struct drm_device *, + struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_new_ref(struct drm_device *, + struct nouveau_channel *alloc_chan, + struct nouveau_channel *ref_chan, uint32_t handle, int size, int align, uint32_t flags, struct nouveau_gpuobj_ref **); extern int nouveau_gpuobj_new_fake(struct drm_device *, uint32_t offset, uint32_t size, uint32_t flags, - struct nouveau_gpuobj**, + struct nouveau_gpuobj **, struct nouveau_gpuobj_ref**); -extern int nouveau_gpuobj_dma_new(struct drm_device *, int channel, int class, - uint64_t offset, uint64_t size, - int access, int target, - struct nouveau_gpuobj **); -extern int nouveau_gpuobj_gart_dma_new(struct drm_device *, int channel, +extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, + uint64_t offset, uint64_t size, int access, + int target, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, uint64_t offset, uint64_t size, int access, struct nouveau_gpuobj **, uint32_t *o_ret); -extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, +extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, struct nouveau_gpuobj **); -extern int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, + struct drm_file *); /* nouveau_irq.c */ extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); -extern void nouveau_irq_preinstall(struct drm_device*); -extern void nouveau_irq_postinstall(struct drm_device*); -extern void nouveau_irq_uninstall(struct drm_device*); +extern void nouveau_irq_preinstall(struct drm_device *); +extern void nouveau_irq_postinstall(struct drm_device *); +extern void nouveau_irq_uninstall(struct drm_device *); /* nouveau_sgdma.c */ extern int nouveau_sgdma_init(struct drm_device *); @@ -358,131 +383,131 @@ extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); /* nv04_fb.c */ -extern int nv04_fb_init(struct drm_device *dev); -extern void nv04_fb_takedown(struct drm_device *dev); +extern int nv04_fb_init(struct drm_device *); +extern void nv04_fb_takedown(struct drm_device *); /* nv10_fb.c */ -extern int nv10_fb_init(struct drm_device *dev); -extern void nv10_fb_takedown(struct drm_device *dev); +extern int nv10_fb_init(struct drm_device *); +extern void nv10_fb_takedown(struct drm_device *); /* nv40_fb.c */ -extern int nv40_fb_init(struct drm_device *dev); -extern void nv40_fb_takedown(struct drm_device *dev); +extern int nv40_fb_init(struct drm_device *); +extern void nv40_fb_takedown(struct drm_device *); /* nv04_fifo.c */ -extern int nv04_fifo_create_context(struct drm_device *dev, int channel); -extern void nv04_fifo_destroy_context(struct drm_device *dev, int channel); -extern int nv04_fifo_load_context(struct drm_device *dev, int channel); -extern int nv04_fifo_save_context(struct drm_device *dev, int channel); +extern int nv04_fifo_create_context(struct nouveau_channel *); +extern void nv04_fifo_destroy_context(struct nouveau_channel *); +extern int nv04_fifo_load_context(struct nouveau_channel *); +extern int nv04_fifo_save_context(struct nouveau_channel *); /* nv10_fifo.c */ -extern int nv10_fifo_create_context(struct drm_device *dev, int channel); -extern void nv10_fifo_destroy_context(struct drm_device *dev, int channel); -extern int nv10_fifo_load_context(struct drm_device *dev, int channel); -extern int nv10_fifo_save_context(struct drm_device *dev, int channel); +extern int nv10_fifo_create_context(struct nouveau_channel *); +extern void nv10_fifo_destroy_context(struct nouveau_channel *); +extern int nv10_fifo_load_context(struct nouveau_channel *); +extern int nv10_fifo_save_context(struct nouveau_channel *); /* nv40_fifo.c */ -extern int nv40_fifo_create_context(struct drm_device *, int channel); -extern void nv40_fifo_destroy_context(struct drm_device *, int channel); -extern int nv40_fifo_load_context(struct drm_device *, int channel); -extern int nv40_fifo_save_context(struct drm_device *, int channel); +extern int nv40_fifo_create_context(struct nouveau_channel *); +extern void nv40_fifo_destroy_context(struct nouveau_channel *); +extern int nv40_fifo_load_context(struct nouveau_channel *); +extern int nv40_fifo_save_context(struct nouveau_channel *); /* nv50_fifo.c */ extern int nv50_fifo_init(struct drm_device *); extern void nv50_fifo_takedown(struct drm_device *); -extern int nv50_fifo_create_context(struct drm_device *, int channel); -extern void nv50_fifo_destroy_context(struct drm_device *, int channel); -extern int nv50_fifo_load_context(struct drm_device *, int channel); -extern int nv50_fifo_save_context(struct drm_device *, int channel); +extern int nv50_fifo_create_context(struct nouveau_channel *); +extern void nv50_fifo_destroy_context(struct nouveau_channel *); +extern int nv50_fifo_load_context(struct nouveau_channel *); +extern int nv50_fifo_save_context(struct nouveau_channel *); /* nv04_graph.c */ -extern void nouveau_nv04_context_switch(struct drm_device *dev); -extern int nv04_graph_init(struct drm_device *dev); -extern void nv04_graph_takedown(struct drm_device *dev); -extern int nv04_graph_create_context(struct drm_device *dev, int channel); -extern void nv04_graph_destroy_context(struct drm_device *dev, int channel); -extern int nv04_graph_load_context(struct drm_device *dev, int channel); -extern int nv04_graph_save_context(struct drm_device *dev, int channel); +extern void nouveau_nv04_context_switch(struct drm_device *); +extern int nv04_graph_init(struct drm_device *); +extern void nv04_graph_takedown(struct drm_device *); +extern int nv04_graph_create_context(struct nouveau_channel *); +extern void nv04_graph_destroy_context(struct nouveau_channel *); +extern int nv04_graph_load_context(struct nouveau_channel *); +extern int nv04_graph_save_context(struct nouveau_channel *); /* nv10_graph.c */ -extern void nouveau_nv10_context_switch(struct drm_device *dev); -extern int nv10_graph_init(struct drm_device *dev); -extern void nv10_graph_takedown(struct drm_device *dev); -extern int nv10_graph_create_context(struct drm_device *dev, int channel); -extern void nv10_graph_destroy_context(struct drm_device *dev, int channel); -extern int nv10_graph_load_context(struct drm_device *dev, int channel); -extern int nv10_graph_save_context(struct drm_device *dev, int channel); +extern void nouveau_nv10_context_switch(struct drm_device *); +extern int nv10_graph_init(struct drm_device *); +extern void nv10_graph_takedown(struct drm_device *); +extern int nv10_graph_create_context(struct nouveau_channel *); +extern void nv10_graph_destroy_context(struct nouveau_channel *); +extern int nv10_graph_load_context(struct nouveau_channel *); +extern int nv10_graph_save_context(struct nouveau_channel *); /* nv20_graph.c */ -extern void nouveau_nv20_context_switch(struct drm_device *dev); -extern int nv20_graph_init(struct drm_device *dev); -extern void nv20_graph_takedown(struct drm_device *dev); -extern int nv20_graph_create_context(struct drm_device *dev, int channel); -extern void nv20_graph_destroy_context(struct drm_device *dev, int channel); -extern int nv20_graph_load_context(struct drm_device *dev, int channel); -extern int nv20_graph_save_context(struct drm_device *dev, int channel); +extern void nouveau_nv20_context_switch(struct drm_device *); +extern int nv20_graph_init(struct drm_device *); +extern void nv20_graph_takedown(struct drm_device *); +extern int nv20_graph_create_context(struct nouveau_channel *); +extern void nv20_graph_destroy_context(struct nouveau_channel *); +extern int nv20_graph_load_context(struct nouveau_channel *); +extern int nv20_graph_save_context(struct nouveau_channel *); /* nv30_graph.c */ -extern int nv30_graph_init(struct drm_device *dev); -extern void nv30_graph_takedown(struct drm_device *dev); -extern int nv30_graph_create_context(struct drm_device *, int channel); -extern void nv30_graph_destroy_context(struct drm_device *, int channel); -extern int nv30_graph_load_context(struct drm_device *, int channel); -extern int nv30_graph_save_context(struct drm_device *, int channel); +extern int nv30_graph_init(struct drm_device *); +extern void nv30_graph_takedown(struct drm_device *); +extern int nv30_graph_create_context(struct nouveau_channel *); +extern void nv30_graph_destroy_context(struct nouveau_channel *); +extern int nv30_graph_load_context(struct nouveau_channel *); +extern int nv30_graph_save_context(struct nouveau_channel *); /* nv40_graph.c */ extern int nv40_graph_init(struct drm_device *); extern void nv40_graph_takedown(struct drm_device *); -extern int nv40_graph_create_context(struct drm_device *, int channel); -extern void nv40_graph_destroy_context(struct drm_device *, int channel); -extern int nv40_graph_load_context(struct drm_device *, int channel); -extern int nv40_graph_save_context(struct drm_device *, int channel); +extern int nv40_graph_create_context(struct nouveau_channel *); +extern void nv40_graph_destroy_context(struct nouveau_channel *); +extern int nv40_graph_load_context(struct nouveau_channel *); +extern int nv40_graph_save_context(struct nouveau_channel *); /* nv50_graph.c */ extern int nv50_graph_init(struct drm_device *); extern void nv50_graph_takedown(struct drm_device *); -extern int nv50_graph_create_context(struct drm_device *, int channel); -extern void nv50_graph_destroy_context(struct drm_device *, int channel); -extern int nv50_graph_load_context(struct drm_device *, int channel); -extern int nv50_graph_save_context(struct drm_device *, int channel); +extern int nv50_graph_create_context(struct nouveau_channel *); +extern void nv50_graph_destroy_context(struct nouveau_channel *); +extern int nv50_graph_load_context(struct nouveau_channel *); +extern int nv50_graph_save_context(struct nouveau_channel *); /* nv04_instmem.c */ -extern int nv04_instmem_init(struct drm_device *dev); -extern void nv04_instmem_takedown(struct drm_device *dev); -extern int nv04_instmem_populate(struct drm_device*, struct nouveau_gpuobj*, +extern int nv04_instmem_init(struct drm_device *); +extern void nv04_instmem_takedown(struct drm_device *); +extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, uint32_t *size); -extern void nv04_instmem_clear(struct drm_device*, struct nouveau_gpuobj*); -extern int nv04_instmem_bind(struct drm_device*, struct nouveau_gpuobj*); -extern int nv04_instmem_unbind(struct drm_device*, struct nouveau_gpuobj*); +extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); /* nv50_instmem.c */ -extern int nv50_instmem_init(struct drm_device *dev); -extern void nv50_instmem_takedown(struct drm_device *dev); -extern int nv50_instmem_populate(struct drm_device*, struct nouveau_gpuobj*, +extern int nv50_instmem_init(struct drm_device *); +extern void nv50_instmem_takedown(struct drm_device *); +extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, uint32_t *size); -extern void nv50_instmem_clear(struct drm_device*, struct nouveau_gpuobj*); -extern int nv50_instmem_bind(struct drm_device*, struct nouveau_gpuobj*); -extern int nv50_instmem_unbind(struct drm_device*, struct nouveau_gpuobj*); +extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); /* nv04_mc.c */ -extern int nv04_mc_init(struct drm_device *dev); -extern void nv04_mc_takedown(struct drm_device *dev); +extern int nv04_mc_init(struct drm_device *); +extern void nv04_mc_takedown(struct drm_device *); /* nv40_mc.c */ -extern int nv40_mc_init(struct drm_device *dev); -extern void nv40_mc_takedown(struct drm_device *dev); +extern int nv40_mc_init(struct drm_device *); +extern void nv40_mc_takedown(struct drm_device *); /* nv50_mc.c */ -extern int nv50_mc_init(struct drm_device *dev); -extern void nv50_mc_takedown(struct drm_device *dev); +extern int nv50_mc_init(struct drm_device *); +extern void nv50_mc_takedown(struct drm_device *); /* nv04_timer.c */ -extern int nv04_timer_init(struct drm_device *dev); -extern uint64_t nv04_timer_read(struct drm_device *dev); -extern void nv04_timer_takedown(struct drm_device *dev); +extern int nv04_timer_init(struct drm_device *); +extern uint64_t nv04_timer_read(struct drm_device *); +extern void nv04_timer_takedown(struct drm_device *); extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); + unsigned long arg); #if defined(__powerpc__) #define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index e5d3ab3c..c7ce1d8d 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -186,10 +186,10 @@ int nouveau_fifo_init(struct drm_device *dev) } static int -nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) +nouveau_fifo_cmdbuf_alloc(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_config *config = &dev_priv->config; struct mem_block *cb; int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); @@ -211,37 +211,34 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } if (cb->flags & NOUVEAU_MEM_AGP) { - ret = nouveau_gpuobj_gart_dma_new(dev, channel, - cb->start, cb->size, + ret = nouveau_gpuobj_gart_dma_new(chan, cb->start, cb->size, NV_DMA_ACCESS_RO, &pushbuf, &chan->pushbuf_base); } else if (cb->flags & NOUVEAU_MEM_PCI) { - ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf); chan->pushbuf_base = 0; } else if (dev_priv->card_type != NV_04) { - ret = nouveau_gpuobj_dma_new - (dev, channel, NV_CLASS_DMA_IN_MEMORY, - cb->start, - cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, - &pushbuf); + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_VIDMEM, &pushbuf); chan->pushbuf_base = 0; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in * VRAM. */ - ret = nouveau_gpuobj_dma_new - (dev, channel, NV_CLASS_DMA_IN_MEMORY, - cb->start + drm_get_resource_start(dev, 1), - cb->size, NV_DMA_ACCESS_RO, - NV_DMA_TARGET_PCI, &pushbuf); + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + cb->start + + drm_get_resource_start(dev, 1), + cb->size, NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI, &pushbuf); chan->pushbuf_base = 0; } @@ -251,7 +248,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) return ret; } - if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf, + if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf))) { DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); if (pushbuf != dev_priv->gart_info.sg_ctxdma) @@ -270,8 +267,8 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, { int ret; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine_func *engine = &dev_priv->Engine; - struct nouveau_fifo *chan; + struct nouveau_engine *engine = &dev_priv->Engine; + struct nouveau_channel *chan; int channel; /* @@ -293,34 +290,36 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, return -EINVAL; (*chan_ret) = channel; - dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_fifo), + dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel), DRM_MEM_DRIVER); if (!dev_priv->fifos[channel]) return -ENOMEM; dev_priv->fifo_alloc_count++; chan = dev_priv->fifos[channel]; + chan->dev = dev; + chan->id = channel; chan->file_priv = file_priv; DRM_INFO("Allocating FIFO number %d\n", channel); /* Setup channel's default objects */ - ret = nouveau_gpuobj_channel_init(dev, channel, vram_handle, tt_handle); + ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } /* allocate a command buffer, and create a dma object for the gpu */ - ret = nouveau_fifo_cmdbuf_alloc(dev, channel); + ret = nouveau_fifo_cmdbuf_alloc(chan); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } /* Allocate space for per-channel fixed notifier memory */ - ret = nouveau_notifier_init_channel(dev, channel, file_priv); + ret = nouveau_notifier_init_channel(chan); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } @@ -333,16 +332,16 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); /* Create a graphics context for new channel */ - ret = engine->graph.create_context(dev, channel); + ret = engine->graph.create_context(chan); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } /* Construct inital RAMFC for new channel */ - ret = engine->fifo.create_context(dev, channel); + ret = engine->fifo.create_context(chan); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } @@ -359,15 +358,15 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, * other case, the GPU will handle this when it switches contexts. */ if (dev_priv->fifo_alloc_count == 1) { - ret = engine->fifo.load_context(dev, channel); + ret = engine->fifo.load_context(chan); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } - ret = engine->graph.load_context(dev, channel); + ret = engine->graph.load_context(chan); if (ret) { - nouveau_fifo_free(dev, channel); + nouveau_fifo_free(chan); return ret; } @@ -399,28 +398,23 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, } /* stops a fifo */ -void nouveau_fifo_free(struct drm_device *dev, int channel) +void nouveau_fifo_free(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine_func *engine = &dev_priv->Engine; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct nouveau_engine *engine = &dev_priv->Engine; - if (!chan) { - DRM_ERROR("Freeing non-existant channel %d\n", channel); - return; - } - - DRM_INFO("%s: freeing fifo %d\n", __func__, channel); + DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id); /* disable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); // FIXME XXX needs more code - engine->fifo.destroy_context(dev, channel); + engine->fifo.destroy_context(chan); /* Cleanup PGRAPH state */ - engine->graph.destroy_context(dev, channel); + engine->graph.destroy_context(chan); /* reenable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); @@ -432,12 +426,12 @@ void nouveau_fifo_free(struct drm_device *dev, int channel) chan->pushbuf_mem = NULL; } - nouveau_notifier_takedown_channel(dev, channel); + nouveau_notifier_takedown_channel(chan); /* Destroy objects belonging to the channel */ - nouveau_gpuobj_channel_takedown(dev, channel); + nouveau_gpuobj_channel_takedown(chan); - dev_priv->fifos[channel] = NULL; + dev_priv->fifos[chan->id] = NULL; dev_priv->fifo_alloc_count--; drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); } @@ -445,14 +439,16 @@ void nouveau_fifo_free(struct drm_device *dev, int channel) /* cleanups all the fifos from file_priv */ void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv) { - int i; struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; DRM_DEBUG("clearing FIFO enables from file_priv\n"); - for(i=0;ififos[i] && - dev_priv->fifos[i]->file_priv==file_priv) - nouveau_fifo_free(dev,i); + for(i = 0; i < nouveau_fifo_number(dev); i++) { + struct nouveau_channel *chan = dev_priv->fifos[i]; + + if (chan && chan->file_priv == file_priv) + nouveau_fifo_free(chan); + } } int @@ -477,7 +473,7 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct d struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_fifo_alloc *init = data; struct drm_map_list *entry; - struct nouveau_fifo *chan; + struct nouveau_channel *chan; int res; if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 7a982ba4..b1090587 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -30,11 +30,10 @@ #include "nouveau_drv.h" int -nouveau_notifier_init_channel(struct drm_device *dev, int channel, - struct drm_file *file_priv) +nouveau_notifier_init_channel(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; int flags, ret; /*TODO: PCI notifier blocks */ @@ -47,9 +46,9 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, flags = NOUVEAU_MEM_FB; flags |= NOUVEAU_MEM_MAPPED; -DRM_DEBUG("Allocating notifier block in %d\n", flags); + DRM_DEBUG("Allocating notifier block in %d\n", flags); chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, - file_priv); + (struct drm_file *)-2); if (!chan->notifier_block) return -ENOMEM; @@ -62,25 +61,23 @@ DRM_DEBUG("Allocating notifier block in %d\n", flags); } void -nouveau_notifier_takedown_channel(struct drm_device *dev, int channel) +nouveau_notifier_takedown_channel(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; if (chan->notifier_block) { nouveau_mem_free(dev, chan->notifier_block); chan->notifier_block = NULL; } - /*XXX: heap destroy */ + nouveau_mem_takedown(&chan->notifier_heap); } int -nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, +nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int count, uint32_t *b_offset) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; struct nouveau_gpuobj *nobj = NULL; struct mem_block *mem; uint32_t offset; @@ -88,14 +85,14 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, if (!chan->notifier_heap) { DRM_ERROR("Channel %d doesn't have a notifier heap!\n", - channel); + chan->id); return -EINVAL; } mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->file_priv); if (!mem) { - DRM_ERROR("Channel %d notifier block full\n", channel); + DRM_ERROR("Channel %d notifier block full\n", chan->id); return -ENOMEM; } mem->flags = NOUVEAU_MEM_NOTIFIER; @@ -113,7 +110,7 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, return -EINVAL; } - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, mem->size, NV_DMA_ACCESS_RW, target, &nobj))) { nouveau_mem_free_block(mem); @@ -121,7 +118,7 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, return ret; } - if ((ret = nouveau_gpuobj_ref_add(dev, channel, handle, nobj, NULL))) { + if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) { nouveau_gpuobj_del(dev, &nobj); nouveau_mem_free_block(mem); DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret); @@ -133,19 +130,16 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, } int -nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) +nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_nouveau_notifier_alloc *na = data; + struct nouveau_channel *chan; int ret; - if (!nouveau_fifo_owner(dev, file_priv, na->channel)) { - DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, na->channel); - return -EPERM; - } + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); - ret = nouveau_notifier_alloc(dev, na->channel, na->handle, - na->count, &na->offset); + ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset); if (ret) return ret; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e8b12bb7..274bb2a7 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -100,7 +100,7 @@ static int nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) { struct drm_nouveau_private *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; + struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; struct nouveau_gpuobj *gpuobj = ref->gpuobj; uint32_t ctx, co, ho; @@ -149,7 +149,7 @@ static void nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; + struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; uint32_t co, ho; @@ -180,34 +180,28 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) } int -nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, - uint32_t flags, struct nouveau_gpuobj **gpuobj_ret) +nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + int size, int align, uint32_t flags, + struct nouveau_gpuobj **gpuobj_ret) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine_func *engine = &dev_priv->Engine; - struct nouveau_fifo *chan = NULL; + struct nouveau_engine *engine = &dev_priv->Engine; struct nouveau_gpuobj *gpuobj; struct mem_block *pramin = NULL; int ret; DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", - channel, size, align, flags); + chan ? chan->id : -1, size, align, flags); if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) return -EINVAL; - if (channel >= 0) { - if (channel > nouveau_fifo_number(dev)) - return -EINVAL; - chan = dev_priv->fifos[channel]; - } - gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); if (!gpuobj) return -ENOMEM; DRM_DEBUG("gpuobj %p\n", gpuobj); gpuobj->flags = flags; - gpuobj->im_channel = channel; + gpuobj->im_channel = chan ? chan->id : -1; /* Choose between global instmem heap, and per-channel private * instmem heap. On dev_private; - struct nouveau_engine_func *engine = &dev_priv->Engine; + struct nouveau_engine *engine = &dev_priv->Engine; struct nouveau_gpuobj *gpuobj; DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); @@ -325,7 +319,8 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) } static int -nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, +nouveau_gpuobj_instance_get(struct drm_device *dev, + struct nouveau_channel *chan, struct nouveau_gpuobj *gpuobj, uint32_t *inst) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -337,15 +332,15 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, return 0; } - if ((channel > 0) && gpuobj->im_channel != channel) { + if (chan && gpuobj->im_channel != chan->id) { DRM_ERROR("Channel mismatch: obj %d, ref %d\n", - gpuobj->im_channel, channel); + gpuobj->im_channel, chan->id); return -EINVAL; } /* NV50 channel-local instance */ - if (channel > 0) { - cpramin = dev_priv->fifos[channel]->ramin->gpuobj; + if (chan > 0) { + cpramin = chan->ramin->gpuobj; *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; return 0; } @@ -371,29 +366,25 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, } int -nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle, - struct nouveau_gpuobj *gpuobj, struct nouveau_gpuobj_ref **ref_ret) +nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, + uint32_t handle, struct nouveau_gpuobj *gpuobj, + struct nouveau_gpuobj_ref **ref_ret) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = NULL; struct nouveau_gpuobj_ref *ref; uint32_t instance; int ret; - DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", channel, handle, gpuobj); + DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", + chan ? chan->id : -1, handle, gpuobj); if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) return -EINVAL; - if (channel >= 0) { - if (channel > nouveau_fifo_number(dev)) - return -EINVAL; - chan = dev_priv->fifos[channel]; - } else - if (!ref_ret) + if (!chan && !ref_ret) return -EINVAL; - ret = nouveau_gpuobj_instance_get(dev, channel, gpuobj, &instance); + ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); if (ret) return ret; @@ -401,7 +392,7 @@ nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle, if (!ref) return -ENOMEM; ref->gpuobj = gpuobj; - ref->channel = channel; + ref->channel = chan ? chan->id : -1; ref->instance = instance; if (!ref_ret) { @@ -452,8 +443,9 @@ int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **p } int -nouveau_gpuobj_new_ref(struct drm_device *dev, int oc, int rc, uint32_t handle, - int size, int align, uint32_t flags, +nouveau_gpuobj_new_ref(struct drm_device *dev, + struct nouveau_channel *oc, struct nouveau_channel *rc, + uint32_t handle, int size, int align, uint32_t flags, struct nouveau_gpuobj_ref **ref) { struct nouveau_gpuobj *gpuobj = NULL; @@ -471,11 +463,9 @@ nouveau_gpuobj_new_ref(struct drm_device *dev, int oc, int rc, uint32_t handle, } static int -nouveau_gpuobj_ref_find(struct drm_device *dev, int channel, uint32_t handle, +nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, struct nouveau_gpuobj_ref **ref_ret) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj_ref *ref = chan->ramht_refs; while (ref) { @@ -524,7 +514,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, } if (pref) { - if ((i = nouveau_gpuobj_ref_add(dev, -1, 0, gpuobj, pref))) { + if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) { nouveau_gpuobj_del(dev, &gpuobj); return i; } @@ -577,10 +567,11 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) to it that can be used to set up context objects. */ int -nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, - uint64_t offset, uint64_t size, int access, int target, - struct nouveau_gpuobj **gpuobj) +nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, + uint64_t offset, uint64_t size, int access, + int target, struct nouveau_gpuobj **gpuobj) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; uint32_t is_scatter_gather = 0; @@ -591,7 +582,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", - channel, class, offset, size); + chan->id, class, offset, size); DRM_DEBUG("access=%d target=%d\n", access, target); switch (target) { @@ -608,7 +599,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, break; } - ret = nouveau_gpuobj_new(dev, channel, + ret = nouveau_gpuobj_new(dev, chan, is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -711,19 +702,19 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, } int -nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, +nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, uint64_t offset, uint64_t size, int access, struct nouveau_gpuobj **gpuobj, uint32_t *o_ret) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || (dev_priv->card_type >= NV_50 && dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { - ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, size, access, NV_DMA_TARGET_AGP, gpuobj); if (o_ret) @@ -798,15 +789,16 @@ nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, set to 0? */ int -nouveau_gpuobj_gr_new(struct drm_device *dev, int channel, int class, +nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, struct nouveau_gpuobj **gpuobj) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - DRM_DEBUG("ch%d class=0x%04x\n", channel, class); + DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class); - ret = nouveau_gpuobj_new(dev, channel, + ret = nouveau_gpuobj_new(dev, chan, nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -847,14 +839,14 @@ nouveau_gpuobj_gr_new(struct drm_device *dev, int channel, int class, } static int -nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel) +nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *pramin = NULL; int size, base, ret; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); /* Base amount for object storage (4KiB enough?) */ size = 0x1000; @@ -876,8 +868,8 @@ nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel) } DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", - channel, size, base); - ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, size, 0x1000, 0, + chan->id, size, base); + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, &chan->ramin); if (ret) { DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); @@ -897,21 +889,21 @@ nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel) } int -nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, +nouveau_gpuobj_channel_init(struct nouveau_channel *chan, uint32_t vram_h, uint32_t tt_h) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *vram = NULL, *tt = NULL; int ret, i; - DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); + DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); /* Reserve a block of PRAMIN for the channel *XXX: maybe on card_type == NV_50) { - ret = nouveau_gpuobj_channel_init_pramin(dev, channel); + ret = nouveau_gpuobj_channel_init_pramin(chan); if (ret) return ret; } @@ -930,7 +922,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); } - if ((ret = nouveau_gpuobj_ref_add(dev, -1, 0, + if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->gart_info.sg_ctxdma, &chan->vm_gart_pt))) return ret; @@ -941,12 +933,12 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, /* RAMHT */ if (dev_priv->card_type < NV_50) { - ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, + ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, &chan->ramht); if (ret) return ret; } else { - ret = nouveau_gpuobj_new_ref(dev, channel, channel, 0, + ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 0x8000, 16, NVOBJ_FLAG_ZERO_ALLOC, &chan->ramht); @@ -955,7 +947,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, } /* VRAM ctxdma */ - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, &vram))) { @@ -963,20 +955,19 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } - if ((ret = nouveau_gpuobj_ref_add(dev, channel, vram_h, vram, NULL))) { + if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) { DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); return ret; } /* TT memory ctxdma */ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { - ret = nouveau_gpuobj_gart_dma_new(dev, channel, 0, + ret = nouveau_gpuobj_gart_dma_new(chan, 0, dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RW, &tt, NULL); } else if (dev_priv->pci_heap) { - ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev->sg->pages * PAGE_SIZE, NV_DMA_ACCESS_RW, NV_DMA_TARGET_PCI_NONLINEAR, &tt); @@ -990,7 +981,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); if (ret) { DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); return ret; @@ -1000,13 +991,12 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, } void -nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) +nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; struct nouveau_gpuobj_ref *ref; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); while ((ref = chan->ramht_refs)) { chan->ramht_refs = ref->next; @@ -1024,35 +1014,33 @@ nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) } -int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) +int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { + struct nouveau_channel *chan; struct drm_nouveau_grobj_alloc *init = data; struct nouveau_gpuobj *gr = NULL; int ret; - if (!nouveau_fifo_owner(dev, file_priv, init->channel)) { - DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, init->channel); - return -EINVAL; - } + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); //FIXME: check args, only allow trusted objects to be created if (init->handle == ~0) return -EINVAL; - if (nouveau_gpuobj_ref_find(dev, init->channel, init->handle, NULL) == + + if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) return -EEXIST; - ret = nouveau_gpuobj_gr_new(dev, init->channel, init->class, &gr); + ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); if (ret) { DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); return ret; } - if ((ret = nouveau_gpuobj_ref_add(dev, init->channel, init->handle, - gr, NULL))) { + if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) { DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", ret, init->channel, init->handle); nouveau_gpuobj_del(dev, &gr); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index f45f2783..26ba8fbf 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -93,7 +93,7 @@ static uint64_t nouveau_stub_timer_read(struct drm_device *dev) { return 0; } static int nouveau_init_engine_ptrs(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine_func *engine = &dev_priv->Engine; + struct nouveau_engine *engine = &dev_priv->Engine; switch (dev_priv->chipset & 0xf0) { case 0x00: @@ -270,7 +270,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) static int nouveau_card_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine_func *engine; + struct nouveau_engine *engine; int ret; /* Map any PCI resources we need on the card */ @@ -332,7 +332,7 @@ static int nouveau_card_init(struct drm_device *dev) static void nouveau_card_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine_func *engine = &dev_priv->Engine; + struct nouveau_engine *engine = &dev_priv->Engine; if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { engine->fifo.takedown(dev); @@ -526,6 +526,7 @@ void nouveau_wait_for_idle(struct drm_device *dev) uint32_t status; do { uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); + (void)pmc_e; status = NV_READ(NV04_PGRAPH_STATUS); if (!status) break; diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index 564efd0b..4d61f4fe 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -36,13 +36,13 @@ #define NV04_RAMFC__SIZE 32 int -nv04_fifo_create_context(struct drm_device *dev, int channel) +nv04_fifo_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(channel), + if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), NV04_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -62,30 +62,29 @@ nv04_fifo_create_context(struct drm_device *dev, int channel) 0)); /* enable the fifo dma operation */ - NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); return 0; } void -nv04_fifo_destroy_context(struct drm_device *dev, int channel) +nv04_fifo_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + + NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); - NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<ramfc) - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int -nv04_fifo_load_context(struct drm_device *dev, int channel) +nv04_fifo_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | chan->id); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); @@ -107,10 +106,10 @@ nv04_fifo_load_context(struct drm_device *dev, int channel) } int -nv04_fifo_save_context(struct drm_device *dev, int channel) +nv04_fifo_save_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index e35e3071..b2ea7804 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -336,14 +336,13 @@ void nouveau_nv04_context_switch(struct drm_device *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x1); } -int nv04_graph_create_context(struct drm_device *dev, int channel) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - DRM_DEBUG("nv04_graph_context_create %d\n", channel); +int nv04_graph_create_context(struct nouveau_channel *chan) { + DRM_DEBUG("nv04_graph_context_create %d\n", chan->id); - memset(dev_priv->fifos[channel]->pgraph_ctx, 0, sizeof(dev_priv->fifos[channel]->pgraph_ctx)); + memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx)); //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; - dev_priv->fifos[channel]->pgraph_ctx[0] = 0x0001ffff; + chan->pgraph_ctx[0] = 0x0001ffff; /* is it really needed ??? */ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); @@ -351,17 +350,17 @@ int nv04_graph_create_context(struct drm_device *dev, int channel) { return 0; } -void nv04_graph_destroy_context(struct drm_device *dev, int channel) +void nv04_graph_destroy_context(struct nouveau_channel *chan) { } -int nv04_graph_load_context(struct drm_device *dev, int channel) +int nv04_graph_load_context(struct nouveau_channel *chan) { DRM_ERROR("stub!\n"); return 0; } -int nv04_graph_save_context(struct drm_device *dev, int channel) +int nv04_graph_save_context(struct nouveau_channel *chan) { DRM_ERROR("stub!\n"); return 0; diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index 7b9c665b..47af0ff0 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -37,13 +37,13 @@ #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) int -nv10_fifo_create_context(struct drm_device *dev, int channel) +nv10_fifo_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(channel), + if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -65,30 +65,29 @@ nv10_fifo_create_context(struct drm_device *dev, int channel) 0); /* enable the fifo dma operation */ - NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); return 0; } void -nv10_fifo_destroy_context(struct drm_device *dev, int channel) +nv10_fifo_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; - NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); - if (chan->ramfc) - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int -nv10_fifo_load_context(struct drm_device *dev, int channel) +nv10_fifo_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | chan->id); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); @@ -124,10 +123,10 @@ nv10_fifo_load_context(struct drm_device *dev, int channel) } int -nv10_fifo_save_context(struct drm_device *dev, int channel) +nv10_fifo_save_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index ce1cbfa7..a2df2d71 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -544,33 +544,33 @@ static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) return -1; } -int nv10_graph_load_context(struct drm_device *dev, int channel) +int nv10_graph_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *fifo = dev_priv->fifos[channel]; int i, j; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) - NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]); + NV_WRITE(nv10_graph_ctx_regs[i], chan->pgraph_ctx[i]); if (dev_priv->chipset>=0x17) { for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) - NV_WRITE(nv17_graph_ctx_regs[j], fifo->pgraph_ctx[i]); + NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]); } return 0; } -int nv10_graph_save_context(struct drm_device *dev, int channel) +int nv10_graph_save_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *fifo = dev_priv->fifos[channel]; int i, j; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) - fifo->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); + chan->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); if (dev_priv->chipset>=0x17) { for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) - fifo->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); + chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); } return 0; @@ -579,12 +579,17 @@ int nv10_graph_save_context(struct drm_device *dev, int channel) void nouveau_nv10_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int channel, channel_old; + struct nouveau_channel *next, *last; + int chid; - channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); + next = dev_priv->fifos[chid]; - DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + last = dev_priv->fifos[chid]; + + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", + last->id, next->id); NV_WRITE(NV04_PGRAPH_FIFO,0x0); #if 0 @@ -592,7 +597,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); NV_WRITE(NV_PFIFO_CACHES, 0x00000000); #endif - nv10_graph_save_context(dev, channel_old); + nv10_graph_save_context(last); nouveau_wait_for_idle(dev); @@ -601,10 +606,10 @@ void nouveau_nv10_context_switch(struct drm_device *dev) nouveau_wait_for_idle(dev); - nv10_graph_load_context(dev, channel); + nv10_graph_load_context(next); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); - NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); + NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); #if 0 NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); @@ -617,17 +622,17 @@ void nouveau_nv10_context_switch(struct drm_device *dev) #define NV_WRITE_CTX(reg, val) do { \ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ if (offset > 0) \ - fifo->pgraph_ctx[offset] = val; \ + chan->pgraph_ctx[offset] = val; \ } while (0) -int nv10_graph_create_context(struct drm_device *dev, int channel) { +int nv10_graph_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *fifo = dev_priv->fifos[channel]; uint32_t tmp, vramsz; - DRM_DEBUG("nv10_graph_context_create %d\n", channel); + DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); - memset(fifo->pgraph_ctx, 0, sizeof(fifo->pgraph_ctx)); + memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx)); /* per channel init from ddx */ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; @@ -663,7 +668,7 @@ int nv10_graph_create_context(struct drm_device *dev, int channel) { /* for the first channel init the regs */ if (dev_priv->fifo_alloc_count == 0) - nv10_graph_load_context(dev, channel); + nv10_graph_load_context(chan); //XXX should be saved/restored for each fifo @@ -672,7 +677,7 @@ int nv10_graph_create_context(struct drm_device *dev, int channel) { return 0; } -void nv10_graph_destroy_context(struct drm_device *dev, int channel) +void nv10_graph_destroy_context(struct nouveau_channel *chan) { } diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 1670c527..d397390f 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -29,39 +29,36 @@ #define NV20_GRCTX_SIZE (3529*4) -int nv20_graph_create_context(struct drm_device *dev, int channel) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; +int nv20_graph_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned int ctx_size = NV20_GRCTX_SIZE; int ret; - if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx))) return ret; /* Initialise default context values */ - INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */ + INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); /* CTX_USER */ - INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, + INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, chan->ramin_grctx->instance >> 4); return 0; } -void nv20_graph_destroy_context(struct drm_device *dev, int channel) { +void nv20_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; - if (chan->ramin_grctx) - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0); + INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); } static void nv20_graph_rdi(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); @@ -73,13 +70,12 @@ static void nv20_graph_rdi(struct drm_device *dev) { /* Save current context (from PGRAPH) into the channel's context */ -int nv20_graph_save_context(struct drm_device *dev, int channel) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; +int nv20_graph_save_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t instance; - instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); + instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id); if (!instance) { return -EINVAL; } @@ -94,20 +90,19 @@ int nv20_graph_save_context(struct drm_device *dev, int channel) { /* Restore the context for a specific channel into PGRAPH */ -int nv20_graph_load_context(struct drm_device *dev, int channel) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; +int nv20_graph_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t instance; - instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); + instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id); if (!instance) { return -EINVAL; } if (instance != (chan->ramin_grctx->instance >> 4)) DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); - NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); + NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */); return 0; @@ -116,27 +111,32 @@ int nv20_graph_load_context(struct drm_device *dev, int channel) { void nouveau_nv20_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int channel, channel_old; + struct nouveau_channel *next, *last; + int chid; - channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); + next = dev_priv->fifos[chid]; - DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + last = dev_priv->fifos[chid]; + + DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", + last->id, next->id); NV_WRITE(NV04_PGRAPH_FIFO,0x0); - nv20_graph_save_context(dev, channel_old); + nv20_graph_save_context(last); nouveau_wait_for_idle(dev); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); - nv20_graph_load_context(dev, channel); + nv20_graph_load_context(next); nouveau_wait_for_idle(dev); - if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != channel) - DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", channel, NV_READ(NV10_PGRAPH_CTX_USER) >> 24); + if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != next->id) + DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", next->id, NV_READ(NV10_PGRAPH_CTX_USER) >> 24); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); @@ -157,7 +157,7 @@ int nv20_graph_init(struct drm_device *dev) { /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; - if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, + if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, dev_priv->ctx_table_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ctx_table))) diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 4ed2e2ba..c605c84e 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -100,11 +100,10 @@ static void nv30_graph_context_init(struct drm_device *dev, struct nouveau_gpuob } -int nv30_graph_create_context(struct drm_device *dev, int channel) +int nv30_graph_create_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); unsigned int ctx_size; int ret; @@ -116,7 +115,7 @@ int nv30_graph_create_context(struct drm_device *dev, int channel) break; } - if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx))) return ret; @@ -124,23 +123,22 @@ int nv30_graph_create_context(struct drm_device *dev, int channel) /* Initialise default context values */ ctx_init(dev, chan->ramin_grctx->gpuobj); - INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */ - INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, + INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); /* CTX_USER */ + INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, chan->ramin_grctx->instance >> 4); return 0; } -void nv30_graph_destroy_context(struct drm_device *dev, int channel) +void nv30_graph_destroy_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0); + INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); } static int @@ -161,10 +159,10 @@ nouveau_graph_wait_idle(struct drm_device *dev) return 0; } -int nv30_graph_load_context(struct drm_device *dev, int channel) +int nv30_graph_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; if (!chan->ramin_grctx) @@ -178,10 +176,10 @@ int nv30_graph_load_context(struct drm_device *dev, int channel) return nouveau_graph_wait_idle(dev); } -int nv30_graph_save_context(struct drm_device *dev, int channel) +int nv30_graph_save_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; if (!chan->ramin_grctx) @@ -197,8 +195,7 @@ int nv30_graph_save_context(struct drm_device *dev, int channel) int nv30_graph_init(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t vramsz, tmp; int ret, i; @@ -209,7 +206,7 @@ int nv30_graph_init(struct drm_device *dev) /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; - if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, + if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, dev_priv->ctx_table_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ctx_table))) diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index ecb1d21e..f04c2882 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -37,13 +37,13 @@ #define NV40_RAMFC__SIZE 128 int -nv40_fifo_create_context(struct drm_device *dev, int channel) +nv40_fifo_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(channel), + if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -68,27 +68,27 @@ nv40_fifo_create_context(struct drm_device *dev, int channel) RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); /* enable the fifo dma operation */ - NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); return 0; } void -nv40_fifo_destroy_context(struct drm_device *dev, int channel) +nv40_fifo_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; - NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); if (chan->ramfc) nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int -nv40_fifo_load_context(struct drm_device *dev, int channel) +nv40_fifo_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp, tmp2; NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); @@ -135,7 +135,7 @@ nv40_fifo_load_context(struct drm_device *dev, int channel) NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); /* Set channel active, and in DMA mode */ - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | channel); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | chan->id); /* Reset DMA_CTL_AT_INFO to INVALID */ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); @@ -144,10 +144,10 @@ nv40_fifo_load_context(struct drm_device *dev, int channel) } int -nv40_fifo_save_context(struct drm_device *dev, int channel) +nv40_fifo_save_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t tmp; RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 441dbae7..c79b63cc 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1224,11 +1224,10 @@ nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) } int -nv40_graph_create_context(struct drm_device *dev, int channel) +nv40_graph_create_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); unsigned int ctx_size; int ret; @@ -1272,7 +1271,7 @@ nv40_graph_create_context(struct drm_device *dev, int channel) break; } - if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx))) return ret; @@ -1284,13 +1283,9 @@ nv40_graph_create_context(struct drm_device *dev, int channel) } void -nv40_graph_destroy_context(struct drm_device *dev, int channel) +nv40_graph_destroy_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; - - if (chan->ramin_grctx) - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); } static int @@ -1327,10 +1322,9 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) *XXX: fails sometimes, not sure why.. */ int -nv40_graph_save_context(struct drm_device *dev, int channel) +nv40_graph_save_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; uint32_t inst; if (!chan->ramin_grctx) @@ -1344,10 +1338,10 @@ nv40_graph_save_context(struct drm_device *dev, int channel) * XXX: fails sometimes.. not sure why */ int -nv40_graph_load_context(struct drm_device *dev, int channel) +nv40_graph_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst; int ret; diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index f7b98220..a5e79260 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -63,7 +63,7 @@ static int nv50_fifo_channel_enable(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct nouveau_channel *chan = dev_priv->fifos[channel]; DRM_DEBUG("ch%d\n", channel); @@ -150,7 +150,7 @@ nv50_fifo_init_regs(struct drm_device *dev) DRM_DEBUG("\n"); - if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, 0x1000, + if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 0x1000, 0x1000, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -191,7 +191,7 @@ nv50_fifo_init(struct drm_device *dev) nv50_fifo_init_reset(dev); - if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, (128+2)*4, 0x1000, + if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, (128+2)*4, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo))) { DRM_ERROR("error creating thingo: %d\n", ret); @@ -225,14 +225,14 @@ nv50_fifo_takedown(struct drm_device *dev) } int -nv50_fifo_create_context(struct drm_device *dev, int channel) +nv50_fifo_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *ramfc = NULL; int ret; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); if (IS_G80) { uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; @@ -242,7 +242,7 @@ nv50_fifo_create_context(struct drm_device *dev, int channel) &ramfc, &chan->ramfc))) return ret; } else { - if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, 0x100, + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -266,15 +266,15 @@ nv50_fifo_create_context(struct drm_device *dev, int channel) INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); if (!IS_G80) { - INSTANCE_WR(chan->ramin->gpuobj, 0, channel); + INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id); INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance); INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); } - if ((ret = nv50_fifo_channel_enable(dev, channel))) { - DRM_ERROR("error enabling ch%d: %d\n", channel, ret); + if ((ret = nv50_fifo_channel_enable(dev, chan->id))) { + DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret); nouveau_gpuobj_ref_del(dev, &chan->ramfc); return ret; } @@ -283,25 +283,24 @@ nv50_fifo_create_context(struct drm_device *dev, int channel) } void -nv50_fifo_destroy_context(struct drm_device *dev, int channel) +nv50_fifo_destroy_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); - nv50_fifo_channel_disable(dev, channel, 0); + nv50_fifo_channel_disable(dev, chan->id, 0); nouveau_gpuobj_ref_del(dev, &chan->ramfc); } int -nv50_fifo_load_context(struct drm_device *dev, int channel) +nv50_fifo_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); /*XXX: incomplete, only touches the regs that NV does */ @@ -319,14 +318,14 @@ nv50_fifo_load_context(struct drm_device *dev, int channel) NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4)); } - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, channel | (1<<16)); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); return 0; } int -nv50_fifo_save_context(struct drm_device *dev, int channel) +nv50_fifo_save_context(struct nouveau_channel *chan) { - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); DRM_ERROR("stub!\n"); return 0; } diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 8df5df25..59c8cfeb 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -188,17 +188,17 @@ nv50_graph_takedown(struct drm_device *dev) } int -nv50_graph_create_context(struct drm_device *dev, int channel) +nv50_graph_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; int grctx_size = 0x60000, hdr; int ret; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); - if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, @@ -218,13 +218,13 @@ nv50_graph_create_context(struct drm_device *dev, int channel) } void -nv50_graph_destroy_context(struct drm_device *dev, int channel) +nv50_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; int i, hdr; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); hdr = IS_G80 ? 0x200 : 0x20; for (i=hdr; idev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); int ret; (void)ret; - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); #if 0 if ((ret = nv50_graph_transfer_context(dev, inst, 0))) @@ -288,13 +288,12 @@ nv50_graph_load_context(struct drm_device *dev, int channel) } int -nv50_graph_save_context(struct drm_device *dev, int channel) +nv50_graph_save_context(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = dev_priv->fifos[channel]; + struct drm_device *dev = chan->dev; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); - DRM_DEBUG("ch%d\n", channel); + DRM_DEBUG("ch%d\n", chan->id); return nv50_graph_transfer_context(dev, inst, 1); } From 97770db72040dc032130413e0cdabc1777560a75 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 21:45:18 +1000 Subject: [PATCH 209/437] nouveau: Various internal and external API changes 1. DRM_NOUVEAU_GPUOBJ_FREE Used to free GPU objects. The obvious usage case is for Gr objects, but notifiers can also be destroyed in the same way. GPU objects gain a destructor method and private data fields with this change, so other specialised cases (like notifiers) can be implemented on top of gpuobjs. 2. DRM_NOUVEAU_CHANNEL_FREE 3. DRM_NOUVEAU_CARD_INIT Ideally we'd do init during module load, but this isn't currently possible. Doing init during firstopen() is bad as X has a love of opening/closing the DRM many times during startup. Once the modesetting-101 branch is merged this can go away. IRQs are enabled in nouveau_card_init() now, rather than having the X server call drmCtlInstHandler(). We'll need this for when we give the kernel module its own channel. 4. DRM_NOUVEAU_GETPARAM Add CHIPSET_ID value, which will return the chipset id derived from NV_PMC_BOOT_0. 4. Use list_* in a few places, rather than home-brewed stuff. --- linux-core/drmP.h | 1 + linux-core/drm_irq.c | 3 +- shared-core/nouveau_drm.h | 33 +++++++---- shared-core/nouveau_drv.h | 30 ++++++++-- shared-core/nouveau_fifo.c | 51 +++++++++++------ shared-core/nouveau_mem.c | 8 +-- shared-core/nouveau_notifier.c | 17 +++++- shared-core/nouveau_object.c | 101 ++++++++++++++++++++++++--------- shared-core/nouveau_state.c | 45 +++++++++------ shared-core/nv04_instmem.c | 10 ---- 10 files changed, 205 insertions(+), 94 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index a61efcff..aa562225 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1075,6 +1075,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev, extern int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); +extern int drm_irq_install(struct drm_device *dev); extern int drm_irq_uninstall(struct drm_device *dev); extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev); diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index fe4316e0..25166b6f 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -80,7 +80,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(struct drm_device * dev) +int drm_irq_install(struct drm_device * dev) { int ret; unsigned long sh_flags = 0; @@ -140,6 +140,7 @@ static int drm_irq_install(struct drm_device * dev) return 0; } +EXPORT_SYMBOL(drm_irq_install); /** * Uninstall the IRQ handler. diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 4016f004..bfc9bd4b 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -25,9 +25,9 @@ #ifndef __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__ -#define NOUVEAU_DRM_HEADER_PATCHLEVEL 9 +#define NOUVEAU_DRM_HEADER_PATCHLEVEL 10 -struct drm_nouveau_fifo_alloc { +struct drm_nouveau_channel_alloc { uint32_t fb_ctxdma_handle; uint32_t tt_ctxdma_handle; @@ -44,6 +44,10 @@ struct drm_nouveau_fifo_alloc { int notifier_size; }; +struct drm_nouveau_channel_free { + int channel; +}; + struct drm_nouveau_grobj_alloc { int channel; uint32_t handle; @@ -53,7 +57,7 @@ struct drm_nouveau_grobj_alloc { #define NOUVEAU_MEM_ACCESS_RO 1 #define NOUVEAU_MEM_ACCESS_WO 2 #define NOUVEAU_MEM_ACCESS_RW 3 -struct drm_nouveau_notifier_alloc { +struct drm_nouveau_notifierobj_alloc { int channel; uint32_t handle; int count; @@ -61,6 +65,11 @@ struct drm_nouveau_notifier_alloc { uint32_t offset; }; +struct drm_nouveau_gpuobj_free { + int channel; + uint32_t handle; +}; + #define NOUVEAU_MEM_FB 0x00000001 #define NOUVEAU_MEM_AGP 0x00000002 #define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 @@ -95,6 +104,7 @@ struct drm_nouveau_mem_free { #define NOUVEAU_GETPARAM_FB_SIZE 8 #define NOUVEAU_GETPARAM_AGP_SIZE 9 #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 +#define NOUVEAU_GETPARAM_CHIPSET_ID 11 struct drm_nouveau_getparam { uint64_t param; uint64_t value; @@ -141,13 +151,16 @@ struct drm_nouveau_sarea { unsigned int nbox; }; -#define DRM_NOUVEAU_FIFO_ALLOC 0x00 -#define DRM_NOUVEAU_GROBJ_ALLOC 0x01 -#define DRM_NOUVEAU_NOTIFIER_ALLOC 0x02 -#define DRM_NOUVEAU_MEM_ALLOC 0x03 -#define DRM_NOUVEAU_MEM_FREE 0x04 -#define DRM_NOUVEAU_GETPARAM 0x05 -#define DRM_NOUVEAU_SETPARAM 0x06 +#define DRM_NOUVEAU_CARD_INIT 0x00 +#define DRM_NOUVEAU_GETPARAM 0x01 +#define DRM_NOUVEAU_SETPARAM 0x02 +#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 +#define DRM_NOUVEAU_CHANNEL_FREE 0x04 +#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 +#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 +#define DRM_NOUVEAU_GPUOBJ_FREE 0x07 +#define DRM_NOUVEAU_MEM_ALLOC 0x08 +#define DRM_NOUVEAU_MEM_FREE 0x09 #endif /* __NOUVEAU_DRM_H__ */ diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 8ec91898..0b173b76 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -34,7 +34,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 9 +#define DRIVER_PATCHLEVEL 10 #define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FLAGS 0xFFFF0000 @@ -67,8 +67,7 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_FAKE (1 << 3) struct nouveau_gpuobj { - struct nouveau_gpuobj *next; - struct nouveau_gpuobj *prev; + struct list_head list; int im_channel; struct mem_block *im_pramin; @@ -80,10 +79,13 @@ struct nouveau_gpuobj { uint32_t engine; uint32_t class; + + void (*dtor)(struct drm_device *, struct nouveau_gpuobj *); + void *priv; }; struct nouveau_gpuobj_ref { - struct nouveau_gpuobj_ref *next; + struct list_head list; struct nouveau_gpuobj *gpuobj; uint32_t instance; @@ -129,7 +131,7 @@ struct nouveau_channel struct nouveau_gpuobj_ref *ramin; /* Private instmem */ struct mem_block *ramin_heap; /* Private PRAMIN heap */ struct nouveau_gpuobj_ref *ramht; /* Hash table */ - struct nouveau_gpuobj_ref *ramht_refs; /* Objects referenced by RAMHT */ + struct list_head ramht_refs; /* Objects referenced by RAMHT */ }; struct nouveau_config { @@ -269,9 +271,17 @@ struct drm_nouveau_private { struct nouveau_config config; - struct nouveau_gpuobj *gpuobj_all; + struct list_head gpuobj_list; }; +#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ + struct drm_nouveau_private *nv = dev->dev_private; \ + if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ + DRM_ERROR("called without init\n"); \ + return -EINVAL; \ + } \ +} while(0) + #define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \ struct drm_nouveau_private *nv = dev->dev_private; \ if (!nouveau_fifo_owner(dev, (cl), (id))) { \ @@ -293,6 +303,7 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data, extern int nouveau_ioctl_setparam(struct drm_device *, void *data, struct drm_file *); extern void nouveau_wait_for_idle(struct drm_device *); +extern int nouveau_card_init(struct drm_device *); extern int nouveau_ioctl_card_init(struct drm_device *, void *data, struct drm_file *); @@ -324,6 +335,8 @@ extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, int cout, uint32_t *offset); extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, struct drm_file *); +extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, + struct drm_file *); /* nouveau_fifo.c */ extern int nouveau_fifo_init(struct drm_device *); @@ -335,6 +348,7 @@ extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, extern void nouveau_fifo_free(struct nouveau_channel *); /* nouveau_object.c */ +extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); @@ -348,6 +362,8 @@ extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, struct nouveau_gpuobj_ref **); extern int nouveau_gpuobj_ref_del(struct drm_device *, struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, + struct nouveau_gpuobj_ref **ref_ret); extern int nouveau_gpuobj_new_ref(struct drm_device *, struct nouveau_channel *alloc_chan, struct nouveau_channel *ref_chan, @@ -368,6 +384,8 @@ extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, struct nouveau_gpuobj **); extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, struct drm_file *); +extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, + struct drm_file *); /* nouveau_irq.c */ extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index c7ce1d8d..152b669a 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -302,6 +302,13 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, DRM_INFO("Allocating FIFO number %d\n", channel); + /* Allocate space for per-channel fixed notifier memory */ + ret = nouveau_notifier_init_channel(chan); + if (ret) { + nouveau_fifo_free(chan); + return ret; + } + /* Setup channel's default objects */ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); if (ret) { @@ -316,13 +323,6 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, return ret; } - /* Allocate space for per-channel fixed notifier memory */ - ret = nouveau_notifier_init_channel(chan); - if (ret) { - nouveau_fifo_free(chan); - return ret; - } - nouveau_wait_for_idle(dev); /* disable the fifo caches */ @@ -426,11 +426,11 @@ void nouveau_fifo_free(struct nouveau_channel *chan) chan->pushbuf_mem = NULL; } - nouveau_notifier_takedown_channel(chan); - /* Destroy objects belonging to the channel */ nouveau_gpuobj_channel_takedown(chan); + nouveau_notifier_takedown_channel(chan); + dev_priv->fifos[chan->id] = NULL; dev_priv->fifo_alloc_count--; drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); @@ -468,14 +468,17 @@ nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv, * ioctls wrapping the functions ***********************************/ -static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) +static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_fifo_alloc *init = data; + struct drm_nouveau_channel_alloc *init = data; struct drm_map_list *entry; struct nouveau_channel *chan; int res; + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return -EINVAL; @@ -519,18 +522,34 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct d return 0; } +static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_channel_free *cfree = data; + struct nouveau_channel *chan; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); + + nouveau_fifo_free(chan); + return 0; +} + /*********************************** * finally, the ioctl table ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF(DRM_NOUVEAU_FIFO_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIER_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index a7044c94..981af8a6 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -549,14 +549,10 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_alloc *alloc = data; struct mem_block *block; - if (!dev_priv) { - DRM_ERROR("%s called with no initialization\n", __FUNCTION__); - return -EINVAL; - } + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size, alloc->flags, file_priv); @@ -575,6 +571,8 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file * struct drm_nouveau_mem_free *memfree = data; struct mem_block *block; + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + block=NULL; if (memfree->flags & NOUVEAU_MEM_FB) block = find_block(dev_priv->fb_heap, memfree->offset); diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index b1090587..31547aae 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -73,6 +73,16 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) nouveau_mem_takedown(&chan->notifier_heap); } +static void +nouveau_notifier_gpuobj_dtor(struct drm_device *dev, + struct nouveau_gpuobj *gpuobj) +{ + DRM_DEBUG("\n"); + + if (gpuobj->priv) + nouveau_mem_free_block(gpuobj->priv); +} + int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int count, uint32_t *b_offset) @@ -90,7 +100,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, } mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, - chan->file_priv); + (struct drm_file *)-2); if (!mem) { DRM_ERROR("Channel %d notifier block full\n", chan->id); return -ENOMEM; @@ -117,6 +127,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, DRM_ERROR("Error creating notifier ctxdma: %d\n", ret); return ret; } + nobj->dtor = nouveau_notifier_gpuobj_dtor; + nobj->priv = mem; if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) { nouveau_gpuobj_del(dev, &nobj); @@ -133,10 +145,11 @@ int nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_notifier_alloc *na = data; + struct drm_nouveau_notifierobj_alloc *na = data; struct nouveau_channel *chan; int ret; + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset); diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 274bb2a7..22ad23cd 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -131,6 +131,8 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ref->channel, co, ref->handle, ctx); INSTANCE_WR(ramht, (co + 0)/4, ref->handle); INSTANCE_WR(ramht, (co + 4)/4, ctx); + + list_add_tail(&ref->list, &chan->ramht_refs); return 0; } DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", @@ -167,6 +169,8 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) INSTANCE_RD(ramht, (co + 4))); INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); + + list_del(&ref->list); return; } @@ -203,6 +207,8 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, gpuobj->flags = flags; gpuobj->im_channel = chan ? chan->id : -1; + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + /* Choose between global instmem heap, and per-channel private * instmem heap. On gpuobj_all) { - gpuobj->next = dev_priv->gpuobj_all; - gpuobj->next->prev = gpuobj; - } - dev_priv->gpuobj_all = gpuobj; - *gpuobj_ret = gpuobj; return 0; } -void nouveau_gpuobj_takedown(struct drm_device *dev) +int +nouveau_gpuobj_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + + if (dev_priv->card_type < NV_50) { + if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, + dev_priv->ramht_size, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ALLOW_NO_REFS, + &dev_priv->ramht, NULL))) + return ret; + } + + return 0; +} + +void +nouveau_gpuobj_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = NULL; + struct list_head *entry, *tmp; DRM_DEBUG("\n"); - while ((gpuobj = dev_priv->gpuobj_all)) { + nouveau_gpuobj_del(dev, &dev_priv->ramht); + + list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { + gpuobj = list_entry(entry, struct nouveau_gpuobj, list); + DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", gpuobj, gpuobj->refcount); gpuobj->refcount = 0; @@ -279,7 +305,8 @@ void nouveau_gpuobj_takedown(struct drm_device *dev) } } -int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) +int +nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->Engine; @@ -296,6 +323,9 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) return -EINVAL; } + if (gpuobj->dtor) + gpuobj->dtor(dev, gpuobj); + engine->instmem.clear(dev, gpuobj); if (gpuobj->im_pramin) { @@ -306,12 +336,7 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) nouveau_mem_free_block(gpuobj->im_pramin); } - if (gpuobj->next) - gpuobj->next->prev = gpuobj->prev; - if (gpuobj->prev) - gpuobj->prev->next = gpuobj->next; - else - dev_priv->gpuobj_all = gpuobj->next; + list_del(&gpuobj->list); *pgpuobj = NULL; drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); @@ -403,9 +428,6 @@ nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); return ret; } - - ref->next = chan->ramht_refs; - chan->ramht_refs = ref; } else { ref->handle = ~0; *ref_ret = ref; @@ -462,19 +484,21 @@ nouveau_gpuobj_new_ref(struct drm_device *dev, return 0; } -static int +int nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, struct nouveau_gpuobj_ref **ref_ret) { - struct nouveau_gpuobj_ref *ref = chan->ramht_refs; + struct nouveau_gpuobj_ref *ref; + struct list_head *entry, *tmp; + + list_for_each_safe(entry, tmp, &chan->ramht_refs) { + ref = list_entry(entry, struct nouveau_gpuobj_ref, list); - while (ref) { if (ref->handle == handle) { if (ref_ret) *ref_ret = ref; return 0; } - ref = ref->next; } return -EINVAL; @@ -499,6 +523,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, gpuobj->im_channel = -1; gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), DRM_MEM_DRIVER); if (!gpuobj->im_pramin) { @@ -897,6 +923,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_gpuobj *vram = NULL, *tt = NULL; int ret, i; + INIT_LIST_HEAD(&chan->ramht_refs); + DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); /* Reserve a block of PRAMIN for the channel @@ -994,14 +1022,17 @@ void nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct list_head *entry, *tmp; struct nouveau_gpuobj_ref *ref; DRM_DEBUG("ch%d\n", chan->id); - while ((ref = chan->ramht_refs)) { - chan->ramht_refs = ref->next; + list_for_each_safe(entry, tmp, &chan->ramht_refs) { + ref = list_entry(entry, struct nouveau_gpuobj_ref, list); + nouveau_gpuobj_ref_del(dev, &ref); } + nouveau_gpuobj_ref_del(dev, &chan->ramht); nouveau_gpuobj_del(dev, &chan->vm_pd); @@ -1022,6 +1053,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct nouveau_gpuobj *gr = NULL; int ret; + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); //FIXME: check args, only allow trusted objects to be created @@ -1029,8 +1061,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, if (init->handle == ~0) return -EINVAL; - if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == - 0) + if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) return -EEXIST; ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); @@ -1050,3 +1081,21 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, return 0; } +int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gpuobj_free *objfree = data; + struct nouveau_gpuobj_ref *ref; + struct nouveau_channel *chan; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); + + if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref))) + return ret; + nouveau_gpuobj_ref_del(dev, &ref); + + return 0; +} + diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 26ba8fbf..4fb53291 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -267,12 +267,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) return 0; } -static int nouveau_card_init(struct drm_device *dev) +int +nouveau_card_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine; int ret; + if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) + return 0; + /* Map any PCI resources we need on the card */ ret = nouveau_init_card_mappings(dev); if (ret) return ret; @@ -290,6 +294,9 @@ static int nouveau_card_init(struct drm_device *dev) engine = &dev_priv->Engine; dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; + ret = drm_irq_install(dev); + if (ret) return ret; + /* Initialise instance memory, must happen before mem_init so we * know exactly how much VRAM we're able to use for "normal" * purposes. @@ -301,6 +308,9 @@ static int nouveau_card_init(struct drm_device *dev) ret = nouveau_mem_init(dev); if (ret) return ret; + ret = nouveau_gpuobj_init(dev); + if (ret) return ret; + /* Parse BIOS tables / Run init tables? */ /* PMC */ @@ -349,6 +359,8 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_mem_close(dev); engine->instmem.takedown(dev); + drm_irq_uninstall(dev); + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; } } @@ -368,14 +380,6 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) /* first module load, setup the mmio/fb mapping */ int nouveau_firstopen(struct drm_device *dev) { - int ret; - - ret = nouveau_card_init(dev); - if (ret) { - DRM_ERROR("nouveau_card_init() failed! (%d)\n", ret); - return ret; - } - return 0; } @@ -395,15 +399,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; dev->dev_private = (void *)dev_priv; - -#if 0 - ret = nouveau_card_init(dev); - if (ret) { - DRM_ERROR("nouveau_card_init() failed! (%d)\n", ret); - return ret; - } -#endif - return 0; } @@ -427,12 +422,24 @@ int nouveau_unload(struct drm_device *dev) return 0; } +int +nouveau_ioctl_card_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return nouveau_card_init(dev); +} + int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_getparam *getparam = data; + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + switch (getparam->param) { + case NOUVEAU_GETPARAM_CHIPSET_ID: + getparam->value = dev_priv->chipset; + break; case NOUVEAU_GETPARAM_PCI_VENDOR: getparam->value=dev->pci_vendor; break; @@ -481,6 +488,8 @@ int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file * struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_setparam *setparam = data; + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + switch (setparam->param) { case NOUVEAU_SETPARAM_CMDBUF_LOCATION: switch (setparam->value) { diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index 35b20abd..36aa6200 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -93,13 +93,6 @@ int nv04_instmem_init(struct drm_device *dev) nv04_instmem_determine_amount(dev); nv04_instmem_configure_fixed_tables(dev); - if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, - dev_priv->ramht_size, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ALLOW_NO_REFS, - &dev_priv->ramht, NULL))) - return ret; - /* Create a heap to manage RAMIN allocations, we don't allocate * the space that was reserved for RAMHT/FC/RO. */ @@ -117,9 +110,6 @@ int nv04_instmem_init(struct drm_device *dev) void nv04_instmem_takedown(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - - nouveau_gpuobj_del(dev, &dev_priv->ramht); } int From 51f24be578025e3f1eae859288adf5232afc898d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 21:46:55 +1000 Subject: [PATCH 210/437] nouveau: Determine trapped channel id from active grctx on >=NV40 --- shared-core/nouveau_irq.c | 59 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index f7baf89e..2ee77d83 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -301,6 +301,61 @@ nouveau_print_bitfield_names(uint32_t value, printk(" (unknown bits 0x%08x)", value); } +static int +nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int channel; + + if (dev_priv->card_type < NV_40) { + channel = (NV_READ(0x400704) >> 20) & 0x1f; + } else + if (dev_priv->card_type < NV_50) { + uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 4; + + /* 0x400704 *sometimes* contains a sensible channel ID, but + * mostly not.. for now lookup which channel owns the active + * PGRAPH context. Probably a better way, but this'll do + * for now. + */ + for (channel = 0; channel < 32; channel++) { + if (dev_priv->fifos[channel] == NULL) + continue; + if (cur_grctx == + dev_priv->fifos[channel]->ramin_grctx->instance) + break; + } + if (channel == 32) { + DRM_ERROR("AIII, unable to determine active channel " + "from PGRAPH context 0x%08x\n", cur_grctx); + return -EINVAL; + } + } else { + uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 12; + + for (channel = 0; channel < 128; channel++) { + if (dev_priv->fifos[channel] == NULL) + continue; + if (cur_grctx == + dev_priv->fifos[channel]->ramin_grctx->instance) + break; + } + if (channel == 128) { + DRM_ERROR("AIII, unable to determine active channel " + "from PGRAPH context 0x%08x\n", cur_grctx); + return -EINVAL; + } + } + + if (channel > nouveau_fifo_number(dev) || + dev_priv->fifos[channel] == NULL) { + DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel); + return -EINVAL; + } + + return 0; +} + static void nouveau_graph_dump_trap_info(struct drm_device *dev) { @@ -310,8 +365,10 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) uint32_t method, subc, data; uint32_t nsource, nstatus; + if (nouveau_graph_trapped_channel(dev, &channel)) + channel = -1; + address = NV_READ(0x400704); - channel = (address >> 20) & 0x1F; subc = (address >> 16) & 0x7; method = address & 0x1FFC; data = NV_READ(0x400708); From cf04641bc61c8bc18101713a8d95ef98e6afae7f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:05:31 +1000 Subject: [PATCH 211/437] nouveau: Give DRM its own gpu channel If your card doesn't have working context switching, it is now broken. --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_sgdma.c | 2 +- shared-core/nouveau_drv.h | 26 ++++++++++++++++++++++++++ shared-core/nouveau_fifo.c | 14 +++++++------- shared-core/nouveau_state.c | 9 +++++++++ shared-core/nv04_instmem.c | 1 - 6 files changed, 44 insertions(+), 10 deletions(-) diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 5aa589cd..3d00cbe6 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,7 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ - nouveau_sgdma.o \ + nouveau_sgdma.o nouveau_dma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 6393a469..df970d11 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -69,7 +69,7 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be) if (nvbe->is_bound) be->func->unbind(be); - for (d = 0; d < nvbe->pages_populated; d--) { + for (d = 0; d < nvbe->pages_populated; d++) { pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 0b173b76..10f9149e 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -134,6 +134,22 @@ struct nouveau_channel struct list_head ramht_refs; /* Objects referenced by RAMHT */ }; +struct nouveau_drm_channel { + struct nouveau_channel *chan; + + /* DMA state */ + int max, put, cur, free; + int push_free; + volatile uint32_t *pushbuf; + + /* Notifiers */ + uint32_t notify0_offset; + + /* Buffer moves */ + uint32_t m2mf_dma_source; + uint32_t m2mf_dma_destin; +}; + struct nouveau_config { struct { int location; @@ -222,6 +238,7 @@ struct drm_nouveau_private { struct nouveau_channel *fifos[NV_MAX_FIFO_NUMBER]; struct nouveau_engine Engine; + struct nouveau_drm_channel channel; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ struct nouveau_gpuobj *ramht; @@ -345,6 +362,10 @@ extern int nouveau_fifo_ctx_size(struct drm_device *); extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *); extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, int channel); +extern int nouveau_fifo_alloc(struct drm_device *dev, + struct nouveau_channel **chan, + struct drm_file *file_priv, + uint32_t fb_ctxdma, uint32_t tt_ctxdma); extern void nouveau_fifo_free(struct nouveau_channel *); /* nouveau_object.c */ @@ -400,6 +421,11 @@ extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); +/* nouveau_dma.c */ +extern int nouveau_dma_channel_init(struct drm_device *); +extern void nouveau_dma_channel_takedown(struct drm_device *); +extern int nouveau_dma_wait(struct drm_device *, int size); + /* nv04_fb.c */ extern int nv04_fb_init(struct drm_device *); extern void nv04_fb_takedown(struct drm_device *); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 152b669a..823801f9 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -261,9 +261,10 @@ nouveau_fifo_cmdbuf_alloc(struct nouveau_channel *chan) } /* allocates and initializes a fifo for user space consumption */ -int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, - struct drm_file *file_priv, - uint32_t vram_handle, uint32_t tt_handle) +int +nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, + struct drm_file *file_priv, + uint32_t vram_handle, uint32_t tt_handle) { int ret; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -288,7 +289,6 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, /* no more fifos. you lost. */ if (channel==nouveau_fifo_number(dev)) return -EINVAL; - (*chan_ret) = channel; dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel), DRM_MEM_DRIVER); @@ -394,6 +394,7 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, NV_WRITE(NV03_PFIFO_CACHES, 1); DRM_INFO("%s: initialised FIFO %d\n", __func__, channel); + *chan_ret = chan; return 0; } @@ -482,13 +483,12 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return -EINVAL; - res = nouveau_fifo_alloc(dev, &init->channel, file_priv, + res = nouveau_fifo_alloc(dev, &chan, file_priv, init->fb_ctxdma_handle, init->tt_ctxdma_handle); if (res) return res; - chan = dev_priv->fifos[init->channel]; - + init->channel = chan->id; init->put_base = chan->pushbuf_base; /* make the fifo available to user space */ diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 4fb53291..9dab34cc 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -274,6 +274,8 @@ nouveau_card_init(struct drm_device *dev) struct nouveau_engine *engine; int ret; + DRM_DEBUG("prev state = %d\n", dev_priv->init_state); + if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) return 0; @@ -335,6 +337,9 @@ nouveau_card_init(struct drm_device *dev) /* what about PVIDEO/PCRTC/PRAMDAC etc? */ + ret = nouveau_dma_channel_init(dev); + if (ret) return ret; + dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; return 0; } @@ -344,7 +349,11 @@ static void nouveau_card_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->Engine; + DRM_DEBUG("prev state = %d\n", dev_priv->init_state); + if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { + nouveau_dma_channel_takedown(dev); + engine->fifo.takedown(dev); engine->graph.takedown(dev); engine->fb.takedown(dev); diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index 36aa6200..5e0f6f4e 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -129,7 +129,6 @@ nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) if (gpuobj && gpuobj->im_backing) { if (gpuobj->im_bound) dev_priv->Engine.instmem.unbind(dev, gpuobj); - nouveau_mem_free(dev, gpuobj->im_backing); gpuobj->im_backing = NULL; } } From 7a0a812ea42d80eed89b7b9993eae42c7c1b1613 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:06:52 +1000 Subject: [PATCH 212/437] nouveau: Remove PGRAPH_SURFACE hack, it wont work now anyway. Need to find another way of doing this, ideally someone'd hunt down which object/method controls it! The Xv blit adaptor is likely now broken on cards that have pNv->WaitVSyncPossible enabled. --- shared-core/nouveau_fifo.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 823801f9..fd21d2f3 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -369,19 +369,6 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, nouveau_fifo_free(chan); return ret; } - - /* Temporary hack, to avoid breaking Xv on cards where the - * initial context value for 0x400710 doesn't have these bits - * set. Proper fix would be to find which object+method is - * responsible for modifying this state. - */ - if (dev_priv->chipset >= 0x10 && dev_priv->chipset < 0x50) { - uint32_t tmp; - tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; - NV_WRITE(NV10_PGRAPH_SURFACE, tmp); - tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; - NV_WRITE(NV10_PGRAPH_SURFACE, tmp); - } } NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, From 92084c6e056a738308ff65f3fcd7411fd7d2995a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:11:18 +1000 Subject: [PATCH 213/437] Export some useful ttm functions to drivers. --- linux-core/drm_bo.c | 10 ++++++---- linux-core/drm_bo_move.c | 1 + linux-core/drm_objects.h | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 467d03ff..4c2b1541 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -517,7 +517,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv, drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) +void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; struct drm_device *dev = tmp_bo->dev; @@ -530,6 +530,7 @@ static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) mutex_unlock(&dev->struct_mutex); } } +EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); /* * Note. The caller has to register (if applicable) @@ -1672,10 +1673,10 @@ int drm_buffer_object_create(struct drm_device *dev, drm_bo_usage_deref_unlocked(&bo); return ret; } +EXPORT_SYMBOL(drm_buffer_object_create); -static int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, - int shareable) +int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { struct drm_device *dev = file_priv->head->dev; int ret; @@ -1694,6 +1695,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); return ret; } +EXPORT_SYMBOL(drm_bo_add_user_object); static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 5e21173c..1a613916 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -128,6 +128,7 @@ int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, *virtual = addr; return 0; } +EXPORT_SYMBOL(drm_mem_reg_ioremap); /** * \c Unmap mapping obtained using drm_bo_ioremap diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e5f2b69c..e34fdbc4 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -483,6 +483,17 @@ extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); +extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, + unsigned long p_offset, unsigned long p_size); +extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); +extern int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int sharable); +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); /* * Buffer object memory move helpers. @@ -502,6 +513,11 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, uint32_t fence_flags, struct drm_bo_mem_reg * new_mem); +extern int drm_mem_reg_ioremap(struct drm_device *dev, + struct drm_bo_mem_reg *mem, void **virtual); +extern void drm_mem_reg_iounmap(struct drm_device *dev, + struct drm_bo_mem_reg *mem, void *virtual); + #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ BUG_ON(!mutex_is_locked(_mutex) || \ From 8d5a8ebc316028f14666697cff33daddbe384bcd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:32:36 +1000 Subject: [PATCH 214/437] nouveau: ouch, add nouveau_dma.[ch] files.. --- linux-core/nouveau_dma.c | 1 + linux-core/nouveau_dma.h | 1 + shared-core/nouveau_dma.c | 168 ++++++++++++++++++++++++++++++++++++++ shared-core/nouveau_dma.h | 98 ++++++++++++++++++++++ 4 files changed, 268 insertions(+) create mode 120000 linux-core/nouveau_dma.c create mode 120000 linux-core/nouveau_dma.h create mode 100644 shared-core/nouveau_dma.c create mode 100644 shared-core/nouveau_dma.h diff --git a/linux-core/nouveau_dma.c b/linux-core/nouveau_dma.c new file mode 120000 index 00000000..f8e0bdc3 --- /dev/null +++ b/linux-core/nouveau_dma.c @@ -0,0 +1 @@ +../shared-core/nouveau_dma.c \ No newline at end of file diff --git a/linux-core/nouveau_dma.h b/linux-core/nouveau_dma.h new file mode 120000 index 00000000..a545e387 --- /dev/null +++ b/linux-core/nouveau_dma.h @@ -0,0 +1 @@ +../shared-core/nouveau_dma.h \ No newline at end of file diff --git a/shared-core/nouveau_dma.c b/shared-core/nouveau_dma.c new file mode 100644 index 00000000..ce5b6299 --- /dev/null +++ b/shared-core/nouveau_dma.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +#define SKIPS 8 + +int +nouveau_dma_channel_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm_channel *dchan = &dev_priv->channel; + struct nouveau_gpuobj *gpuobj = NULL; + int grclass, ret, i; + + DRM_DEBUG("\n"); + + /* Allocate channel */ + ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2, + NvDmaFB, NvDmaTT); + if (ret) { + DRM_ERROR("Error allocating GPU channel: %d\n", ret); + return ret; + } + DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id); + + /* Map push buffer */ + drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev); + if (!dchan->chan->pushbuf_mem->map->handle) { + DRM_ERROR("Failed to ioremap push buffer\n"); + return -EINVAL; + } + dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle; + + /* Initialise DMA vars */ + dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2; + dchan->put = dchan->chan->pushbuf_base >> 2; + dchan->cur = dchan->put; + dchan->free = dchan->max - dchan->cur; + + /* Insert NOPS for SKIPS */ + dchan->free -= SKIPS; + dchan->push_free = SKIPS; + for (i=0; ichan, NvNotify0, 1, + &dchan->notify0_offset))) { + DRM_ERROR("Error allocating NvNotify0: %d\n", ret); + return ret; + } + + /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ + if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT; + else grclass = NV50_MEMORY_TO_MEMORY_FORMAT; + if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) { + DRM_ERROR("Error creating NvM2MF: %d\n", ret); + return ret; + } + + if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF, + gpuobj, NULL))) { + DRM_ERROR("Error referencing NvM2MF: %d\n", ret); + return ret; + } + dchan->m2mf_dma_source = NvDmaFB; + dchan->m2mf_dma_destin = NvDmaFB; + + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); + OUT_RING (NvM2MF); + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1); + OUT_RING (NvNotify0); + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); + OUT_RING (dchan->m2mf_dma_source); + OUT_RING (dchan->m2mf_dma_destin); + FIRE_RING(); + + return 0; +} + +void +nouveau_dma_channel_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm_channel *dchan = &dev_priv->channel; + + DRM_DEBUG("\n"); + + if (dchan->chan) { + nouveau_fifo_free(dchan->chan); + dchan->chan = NULL; + } +} + +#define RING_SKIPS 8 + +#define READ_GET() ((NV_READ(NV03_FIFO_REGS_DMAGET(dchan->chan->id)) - \ + dchan->chan->pushbuf_base) >> 2) +#define WRITE_PUT(val) do { \ + NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id), \ + ((val) << 2) + dchan->chan->pushbuf_base); \ +} while(0) + +int +nouveau_dma_wait(struct drm_device *dev, int size) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm_channel *dchan = &dev_priv->channel; + uint32_t get; + + while (dchan->free < size) { + get = READ_GET(); + + if (dchan->put >= get) { + dchan->free = dchan->max - dchan->cur; + + if (dchan->free < size) { + dchan->push_free = 1; + OUT_RING(0x20000000|dchan->chan->pushbuf_base); + if (get <= RING_SKIPS) { + /*corner case - will be idle*/ + if (dchan->put <= RING_SKIPS) + WRITE_PUT(RING_SKIPS + 1); + + do { + get = READ_GET(); + } while (get <= RING_SKIPS); + } + + WRITE_PUT(RING_SKIPS); + dchan->cur = dchan->put = RING_SKIPS; + dchan->free = get - (RING_SKIPS + 1); + } + } else { + dchan->free = get - dchan->cur - 1; + } + } + + return 0; +} + diff --git a/shared-core/nouveau_dma.h b/shared-core/nouveau_dma.h new file mode 100644 index 00000000..5e51c1c4 --- /dev/null +++ b/shared-core/nouveau_dma.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_DMA_H__ +#define __NOUVEAU_DMA_H__ + +typedef enum { + NvSubM2MF = 0, +} nouveau_subchannel_id_t; + +typedef enum { + NvM2MF = 0x80039001, + NvDmaFB = 0x8003d001, + NvDmaTT = 0x8003d002, + NvNotify0 = 0x8003d003 +} nouveau_object_handle_t; + +#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 +#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 +#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 +#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180 +#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184 +#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c + +#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 +#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 +#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c +#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 +#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c + +#define BEGIN_RING(subc, mthd, cnt) do { \ + int push_size = (cnt) + 1; \ + if (dchan->push_free) { \ + DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \ + break; \ + } \ + if (dchan->free < push_size) { \ + if (nouveau_dma_wait(dev, push_size)) { \ + DRM_ERROR("FIFO timeout\n"); \ + break; \ + } \ + } \ + dchan->free -= push_size; \ + dchan->push_free = push_size; \ + OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \ +} while(0) + +#define OUT_RING(data) do { \ + if (dchan->push_free == 0) { \ + DRM_ERROR("no space left in packet\n"); \ + break; \ + } \ + dchan->pushbuf[dchan->cur++] = (data); \ + dchan->push_free--; \ +} while(0) + +#define FIRE_RING() do { \ + if (dchan->push_free) { \ + DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \ + break; \ + } \ + if (dchan->cur != dchan->put) { \ + DRM_MEMORYBARRIER(); \ + dchan->put = dchan->cur; \ + NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id), \ + (dchan->put<<2)); \ + } \ +} while(0) + +#endif + From ac24f328ec8954f78b1025db716abdd5b25b3dd9 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Mon, 6 Aug 2007 17:14:26 +0200 Subject: [PATCH 215/437] nouveau: Bump PCI GART to 16MB --- shared-core/nouveau_mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 981af8a6..419522f4 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -411,7 +411,7 @@ int nouveau_mem_init(struct drm_device *dev) struct drm_scatter_gather sgreq; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); - sgreq.size = 4 << 20; //4MB of PCI scatter-gather zone + sgreq.size = 16 << 20; //4MB of PCI scatter-gather zone if (drm_sg_alloc(dev, &sgreq)) { DRM_ERROR("Unable to allocate 4MB of scatter-gather" From 66f5232d9393f6886d8fd1a60b2d75cd009b972c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 7 Aug 2007 01:51:46 +1000 Subject: [PATCH 216/437] nouveau: Init global gpuobj list early, unbreaks sgdma code. --- shared-core/nouveau_object.c | 2 -- shared-core/nouveau_state.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 22ad23cd..bb096531 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -270,8 +270,6 @@ nouveau_gpuobj_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - INIT_LIST_HEAD(&dev_priv->gpuobj_list); - if (dev_priv->card_type < NV_50) { if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, dev_priv->ramht_size, diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 9dab34cc..a23d6001 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -299,6 +299,8 @@ nouveau_card_init(struct drm_device *dev) ret = drm_irq_install(dev); if (ret) return ret; + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + /* Initialise instance memory, must happen before mem_init so we * know exactly how much VRAM we're able to use for "normal" * purposes. From 997a9a738ec26cf0ef2c7dee5e30bb53bd11bf6c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:31:34 -0700 Subject: [PATCH 217/437] Eliminate allocation "owner" usage. --- linux-core/xgi_cmdlist.c | 1 - linux-core/xgi_drv.h | 22 +++------------------- linux-core/xgi_fb.c | 7 ++----- linux-core/xgi_pcie.c | 26 +------------------------- 4 files changed, 6 insertions(+), 50 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 4bb147c4..e0ca31f1 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -38,7 +38,6 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { struct xgi_mem_alloc mem_alloc = { .size = size, - .owner = PCIE_2D, }; int err; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 8dec1fa1..f771517d 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -34,11 +34,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070723" +#define DRIVER_DATE "20070806" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 1 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -53,8 +53,6 @@ struct xgi_mem_block { unsigned long offset; unsigned long size; struct drm_file * filp; - - unsigned int owner; }; struct xgi_mem_heap { @@ -93,23 +91,9 @@ struct xgi_info { struct xgi_cmdring_info cmdring; }; -enum PcieOwner { - PCIE_2D = 0, - /* - PCIE_3D should not begin with 1, - 2D alloc pcie memory will use owner 1. - */ - PCIE_3D = 11, /*vetex buf */ - PCIE_3D_CMDLIST = 12, - PCIE_3D_SCRATCHPAD = 13, - PCIE_3D_TEXTURE = 14, - PCIE_INVALID = 0x7fffffff -}; - - extern struct kmem_cache *xgi_mem_block_cache; extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long size, enum PcieOwner owner); + unsigned long size); extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 9c60a874..bbdebb57 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -93,7 +93,6 @@ struct xgi_mem_block *xgi_mem_new_node(void) block->offset = 0; block->size = 0; - block->owner = PCIE_INVALID; block->filp = (struct drm_file *) -1; return block; @@ -101,8 +100,7 @@ struct xgi_mem_block *xgi_mem_new_node(void) struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long originalSize, - enum PcieOwner owner) + unsigned long originalSize) { struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -167,7 +165,6 @@ struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, heap->max_freesize -= size; list_add(&used_block->list, &heap->used_list); - used_block->owner = owner; return (used_block); } @@ -258,7 +255,7 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, ("Video RAM allocation on front buffer successfully! \n"); } else { down(&info->fb_sem); - block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D); + block = xgi_mem_alloc(&info->fb_heap, alloc->size); up(&info->fb_sem); if (block == NULL) { diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b91471b8..f66ffee9 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -153,31 +153,7 @@ int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct xgi_mem_block *block; down(&info->pcie_sem); - if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) { - DRM_INFO("PCIE Vertex has been created, return directly.\n"); - block = xgi_pcie_vertex_block; - } - else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) { - DRM_INFO("PCIE Cmdlist has been created, return directly.\n"); - block = xgi_pcie_cmdlist_block; - } - else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) { - DRM_INFO("PCIE Scratchpad has been created, return directly.\n"); - block = xgi_pcie_scratchpad_block; - } - else { - block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner); - - if (alloc->owner == PCIE_3D) { - xgi_pcie_vertex_block = block; - } - else if (alloc->owner == PCIE_3D_CMDLIST) { - xgi_pcie_cmdlist_block = block; - } - else if (alloc->owner == PCIE_3D_SCRATCHPAD) { - xgi_pcie_scratchpad_block = block; - } - } + block = xgi_mem_alloc(&info->pcie_heap, alloc->size); up(&info->pcie_sem); if (block == NULL) { From 78e9c1a93d00097895bc77d9ac90da1945021804 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:37:56 -0700 Subject: [PATCH 218/437] Eliminate special-case handling of framebuffer (fake) allocation. --- linux-core/xgi_drv.h | 2 +- linux-core/xgi_fb.c | 49 ++++++++++++++++---------------------------- 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index f771517d..8a144fda 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_PATCHLEVEL 2 #include "xgi_cmdlist.h" #include "xgi_drm.h" diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index bbdebb57..b27b6b20 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -247,32 +247,23 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, { struct xgi_mem_block *block; - if (alloc->is_front) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->offset = 0; - alloc->hw_addr = 0; - DRM_INFO - ("Video RAM allocation on front buffer successfully! \n"); + down(&info->fb_sem); + block = xgi_mem_alloc(&info->fb_heap, alloc->size); + up(&info->fb_sem); + + if (block == NULL) { + alloc->size = 0; + DRM_ERROR("Video RAM allocation failed\n"); + return -ENOMEM; } else { - down(&info->fb_sem); - block = xgi_mem_alloc(&info->fb_heap, alloc->size); - up(&info->fb_sem); + DRM_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *)block->offset); + alloc->location = XGI_MEMLOC_LOCAL; + alloc->size = block->size; + alloc->offset = block->offset; + alloc->hw_addr = block->offset; - if (block == NULL) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = 0; - DRM_ERROR("Video RAM allocation failed\n"); - return -ENOMEM; - } else { - DRM_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *)block->offset); - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = block->size; - alloc->offset = block->offset; - alloc->hw_addr = block->offset; - - block->filp = filp; - } + block->filp = filp; } return 0; @@ -295,13 +286,9 @@ int xgi_fb_free(struct xgi_info * info, unsigned long offset, { int err = 0; - if (offset == 0) { - DRM_INFO("free onscreen frame buffer successfully !\n"); - } else { - down(&info->fb_sem); - err = xgi_mem_free(&info->fb_heap, offset, filp); - up(&info->fb_sem); - } + down(&info->fb_sem); + err = xgi_mem_free(&info->fb_heap, offset, filp); + up(&info->fb_sem); return err; } From a6fb93a150f90ada9af6760b52d34716497f744f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:43:51 -0700 Subject: [PATCH 219/437] Finish removing allocation "owner" infrastructure. --- linux-core/xgi_pcie.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index f66ffee9..be6915e8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -28,10 +28,6 @@ #include "xgi_regs.h" #include "xgi_misc.h" -static struct xgi_mem_block *xgi_pcie_vertex_block = NULL; -static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; -static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; - static int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, struct drm_file * filp); @@ -220,14 +216,7 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, struct drm_file * filp) { - const bool isvertex = (xgi_pcie_vertex_block - && (xgi_pcie_vertex_block->offset == offset)); - int err = xgi_mem_free(&info->pcie_heap, offset, filp); - - if (!err && isvertex) - xgi_pcie_vertex_block = NULL; - - return err; + return xgi_mem_free(&info->pcie_heap, offset, filp); } From d749cc9ae8c50157a1588369222a591410002c26 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 6 Aug 2007 15:45:37 -0700 Subject: [PATCH 220/437] Initialize the AGP structure's base address at init rather than enable. Not all drivers call enable (intel), but they would still like to use this member in driver code. --- linux-core/drm_agpsupport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index df54360d..4618823c 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -183,7 +183,6 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) #else agp_enable(dev->agp->bridge, mode.mode); #endif - dev->agp->base = dev->agp->agp_info.aper_base; dev->agp->enabled = 1; return 0; } @@ -441,6 +440,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) INIT_LIST_HEAD(&head->memory); head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; + head->base = head->agp_info.aper_base; return head; } From 5362cc723e6605c31d152eb22ee3dc40c9e3f56b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:52:06 -0700 Subject: [PATCH 221/437] Eliminate unnecessary function xgi_pcie_free_locked. --- linux-core/xgi_pcie.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index be6915e8..df49615a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -28,9 +28,6 @@ #include "xgi_regs.h" #include "xgi_misc.h" -static int xgi_pcie_free_locked(struct xgi_info * info, - unsigned long offset, struct drm_file * filp); - static int xgi_pcie_lut_init(struct xgi_info * info) { u8 temp = 0; @@ -206,27 +203,20 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) break; } - (void) xgi_pcie_free_locked(info, block->offset, filp); + (void) xgi_mem_free(&info->pcie_heap, block->offset, filp); } while(1); up(&info->pcie_sem); } -int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, - struct drm_file * filp) -{ - return xgi_mem_free(&info->pcie_heap, offset, filp); -} - - int xgi_pcie_free(struct xgi_info * info, unsigned long offset, struct drm_file * filp) { int err; down(&info->pcie_sem); - err = xgi_pcie_free_locked(info, offset, filp); + err = xgi_mem_free(&info->pcie_heap, offset, filp); up(&info->pcie_sem); if (err) { From f96bff9e213a950ab910832908d30e732435e628 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:09:05 -0700 Subject: [PATCH 222/437] Unify infrastructure for allocating (not yet freeing) on-card / GART memory. --- linux-core/xgi_cmdlist.c | 3 ++- linux-core/xgi_drv.h | 9 ++------- linux-core/xgi_fb.c | 18 ++++++++++++------ linux-core/xgi_pcie.c | 31 ++----------------------------- 4 files changed, 18 insertions(+), 43 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e0ca31f1..33155827 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -37,11 +37,12 @@ static void xgi_cmdlist_reset(struct xgi_info * info); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { struct xgi_mem_alloc mem_alloc = { + .location = XGI_MEMLOC_NON_LOCAL, .size = size, }; int err; - err = xgi_pcie_alloc(info, &mem_alloc, 0); + err = xgi_alloc(info, &mem_alloc, 0); if (err) { return err; } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 8a144fda..48c4b42c 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 2 +#define DRIVER_PATCHLEVEL 3 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -92,8 +92,6 @@ struct xgi_info { }; extern struct kmem_cache *xgi_mem_block_cache; -extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long size); extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, @@ -102,7 +100,7 @@ extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); extern int xgi_fb_heap_init(struct xgi_info * info); -extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, +extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp); extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, @@ -111,9 +109,6 @@ extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_lut_cleanup(struct xgi_info * info); -extern int xgi_pcie_alloc(struct xgi_info * info, - struct xgi_mem_alloc * alloc, struct drm_file * filp); - extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index b27b6b20..1d5dc22b 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -99,8 +99,8 @@ struct xgi_mem_block *xgi_mem_new_node(void) } -struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long originalSize) +static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, + unsigned long originalSize) { struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -242,13 +242,15 @@ int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, } -int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, +int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp) { struct xgi_mem_block *block; down(&info->fb_sem); - block = xgi_mem_alloc(&info->fb_heap, alloc->size); + block = xgi_mem_alloc((alloc->location == XGI_MEMLOC_LOCAL) + ? &info->fb_heap : &info->pcie_heap, + alloc->size); up(&info->fb_sem); if (block == NULL) { @@ -258,11 +260,14 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } else { DRM_INFO("Video RAM allocation succeeded: 0x%p\n", (char *)block->offset); - alloc->location = XGI_MEMLOC_LOCAL; alloc->size = block->size; alloc->offset = block->offset; alloc->hw_addr = block->offset; + if (alloc->location == XGI_MEMLOC_NON_LOCAL) { + alloc->hw_addr += info->pcie.base; + } + block->filp = filp; } @@ -277,7 +282,8 @@ int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - return xgi_fb_alloc(info, alloc, filp); + alloc->location = XGI_MEMLOC_LOCAL; + return xgi_alloc(info, alloc, filp); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index df49615a..c0d424f5 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -140,34 +140,6 @@ int xgi_pcie_heap_init(struct xgi_info * info) } -int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - struct drm_file * filp) -{ - struct xgi_mem_block *block; - - down(&info->pcie_sem); - block = xgi_mem_alloc(&info->pcie_heap, alloc->size); - up(&info->pcie_sem); - - if (block == NULL) { - alloc->location = XGI_MEMLOC_INVALID; - alloc->size = 0; - DRM_ERROR("PCIE RAM allocation failed\n"); - return -ENOMEM; - } else { - DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n", - block->offset); - alloc->location = XGI_MEMLOC_NON_LOCAL; - alloc->size = block->size; - alloc->hw_addr = block->offset + info->pcie.base; - alloc->offset = block->offset; - - block->filp = filp; - return 0; - } -} - - int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp) { @@ -175,7 +147,8 @@ int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - return xgi_pcie_alloc(info, alloc, filp); + alloc->location = XGI_MEMLOC_NON_LOCAL; + return xgi_alloc(info, alloc, filp); } From 90907c59152f628d6f0efea4927a06e547f4a3c7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:17:23 -0700 Subject: [PATCH 223/437] Replace per-heap semaphores with drm_device::struct_mutex. --- linux-core/xgi_drv.c | 3 --- linux-core/xgi_drv.h | 3 --- linux-core/xgi_fb.c | 12 ++++++------ linux-core/xgi_pcie.c | 8 ++++---- 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 33b3a51d..e98fd608 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -360,9 +360,6 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = info; info->dev = dev; - sema_init(&info->fb_sem, 1); - sema_init(&info->pcie_sem, 1); - info->mmio.base = drm_get_resource_start(dev, 1); info->mmio.size = drm_get_resource_len(dev, 1); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 48c4b42c..384381c7 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -85,9 +85,6 @@ struct xgi_info { struct xgi_mem_heap fb_heap; struct xgi_mem_heap pcie_heap; - struct semaphore fb_sem; - struct semaphore pcie_sem; - struct xgi_cmdring_info cmdring; }; diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 1d5dc22b..373c45dd 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -247,11 +247,11 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, { struct xgi_mem_block *block; - down(&info->fb_sem); + mutex_lock(&info->dev->struct_mutex); block = xgi_mem_alloc((alloc->location == XGI_MEMLOC_LOCAL) ? &info->fb_heap : &info->pcie_heap, alloc->size); - up(&info->fb_sem); + mutex_unlock(&info->dev->struct_mutex); if (block == NULL) { alloc->size = 0; @@ -292,9 +292,9 @@ int xgi_fb_free(struct xgi_info * info, unsigned long offset, { int err = 0; - down(&info->fb_sem); + mutex_lock(&info->dev->struct_mutex); err = xgi_mem_free(&info->fb_heap, offset, filp); - up(&info->fb_sem); + mutex_unlock(&info->dev->struct_mutex); return err; } @@ -324,7 +324,7 @@ void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) return; } - down(&info->fb_sem); + mutex_lock(&info->dev->struct_mutex); do { struct xgi_mem_block *block; @@ -342,5 +342,5 @@ void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) (void) xgi_mem_free(&info->fb_heap, block->offset, filp); } while(1); - up(&info->fb_sem); + mutex_unlock(&info->dev->struct_mutex); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index c0d424f5..883fbe7e 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -161,7 +161,7 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) return; } - down(&info->pcie_sem); + mutex_lock(&info->dev->struct_mutex); do { struct xgi_mem_block *block; @@ -179,7 +179,7 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) (void) xgi_mem_free(&info->pcie_heap, block->offset, filp); } while(1); - up(&info->pcie_sem); + mutex_unlock(&info->dev->struct_mutex); } @@ -188,9 +188,9 @@ int xgi_pcie_free(struct xgi_info * info, unsigned long offset, { int err; - down(&info->pcie_sem); + mutex_lock(&info->dev->struct_mutex); err = xgi_mem_free(&info->pcie_heap, offset, filp); - up(&info->pcie_sem); + mutex_unlock(&info->dev->struct_mutex); if (err) { DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); From f3072becda3a2d5fe587f20e155d4d4f9ace60a2 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:35:07 -0700 Subject: [PATCH 224/437] Refactor xgi_(fb|pcie)_free_all into xgi_free_all. --- linux-core/xgi_drv.c | 6 ++++-- linux-core/xgi_drv.h | 6 +++--- linux-core/xgi_fb.c | 14 ++++++-------- linux-core/xgi_pcie.c | 31 ------------------------------- 4 files changed, 13 insertions(+), 44 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index e98fd608..f6e7b550 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -284,8 +284,10 @@ void xgi_driver_preclose(struct drm_device * dev, struct drm_file * filp) { struct xgi_info * info = dev->dev_private; - xgi_pcie_free_all(info, filp); - xgi_fb_free_all(info, filp); + mutex_lock(&info->dev->struct_mutex); + xgi_free_all(info, &info->pcie_heap, filp); + xgi_free_all(info, &info->fb_heap, filp); + mutex_unlock(&info->dev->struct_mutex); } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 384381c7..79276b70 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 3 +#define DRIVER_PATCHLEVEL 4 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -111,8 +111,8 @@ extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_pcie_free_all(struct xgi_info *, struct drm_file *); -extern void xgi_fb_free_all(struct xgi_info *, struct drm_file *); +extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, + struct drm_file *); extern int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 373c45dd..f8341a67 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -318,29 +318,27 @@ int xgi_fb_heap_init(struct xgi_info * info) /** * Free all blocks associated with a particular file handle. */ -void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) +void xgi_free_all(struct xgi_info * info, struct xgi_mem_heap * heap, + struct drm_file * filp) { - if (!info->fb_heap.initialized) { + if (!heap->initialized) { return; } - mutex_lock(&info->dev->struct_mutex); do { struct xgi_mem_block *block; - list_for_each_entry(block, &info->fb_heap.used_list, list) { + list_for_each_entry(block, &heap->used_list, list) { if (block->filp == filp) { break; } } - if (&block->list == &info->fb_heap.used_list) { + if (&block->list == &heap->used_list) { break; } - (void) xgi_mem_free(&info->fb_heap, block->offset, filp); + (void) xgi_mem_free(heap, block->offset, filp); } while(1); - - mutex_unlock(&info->dev->struct_mutex); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 883fbe7e..b2edf3b1 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -152,37 +152,6 @@ int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, } -/** - * Free all blocks associated with a particular file handle. - */ -void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) -{ - if (!info->pcie_heap.initialized) { - return; - } - - mutex_lock(&info->dev->struct_mutex); - - do { - struct xgi_mem_block *block; - - list_for_each_entry(block, &info->pcie_heap.used_list, list) { - if (block->filp == filp) { - break; - } - } - - if (&block->list == &info->pcie_heap.used_list) { - break; - } - - (void) xgi_mem_free(&info->pcie_heap, block->offset, filp); - } while(1); - - mutex_unlock(&info->dev->struct_mutex); -} - - int xgi_pcie_free(struct xgi_info * info, unsigned long offset, struct drm_file * filp) { From 6718198897ef9e275506d3fcb497641e1b09d3b1 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:56:20 -0700 Subject: [PATCH 225/437] Release client memory in reclaim_buffers_idlelocked instead of preclose. --- linux-core/xgi_drv.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index f6e7b550..b6fea437 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -61,9 +61,9 @@ static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static int xgi_driver_load(struct drm_device *dev, unsigned long flags); static int xgi_driver_unload(struct drm_device *dev); -static void xgi_driver_preclose(struct drm_device * dev, - struct drm_file * filp); static void xgi_driver_lastclose(struct drm_device * dev); +static void xgi_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file * filp); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); @@ -74,7 +74,6 @@ static struct drm_driver driver = { .dev_priv_size = sizeof(struct xgi_info), .load = xgi_driver_load, .unload = xgi_driver_unload, - .preclose = xgi_driver_preclose, .lastclose = xgi_driver_lastclose, .dma_quiescent = NULL, .irq_preinstall = NULL, @@ -82,6 +81,7 @@ static struct drm_driver driver = { .irq_uninstall = NULL, .irq_handler = xgi_kern_isr, .reclaim_buffers = drm_core_reclaim_buffers, + .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = xgi_ioctls, @@ -280,17 +280,6 @@ int xgi_bootstrap(struct drm_device * dev, void * data, } -void xgi_driver_preclose(struct drm_device * dev, struct drm_file * filp) -{ - struct xgi_info * info = dev->dev_private; - - mutex_lock(&info->dev->struct_mutex); - xgi_free_all(info, &info->pcie_heap, filp); - xgi_free_all(info, &info->fb_heap, filp); - mutex_unlock(&info->dev->struct_mutex); -} - - void xgi_driver_lastclose(struct drm_device * dev) { struct xgi_info * info = dev->dev_private; @@ -322,6 +311,23 @@ void xgi_driver_lastclose(struct drm_device * dev) } +void xgi_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file * filp) +{ + struct xgi_info * info = dev->dev_private; + + mutex_lock(&info->dev->struct_mutex); + if (dev->driver->dma_quiescent) { + dev->driver->dma_quiescent(dev); + } + + xgi_free_all(info, &info->pcie_heap, filp); + xgi_free_all(info, &info->fb_heap, filp); + mutex_unlock(&info->dev->struct_mutex); + return; +} + + /* * driver receives an interrupt if someone waiting, then hand it off. */ From f7ba02b7458823627097a2320bf9befa84fc9c76 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 17:27:15 -0700 Subject: [PATCH 226/437] Unify infrastructure for freeing on-card / GART memory. --- linux-core/xgi_cmdlist.c | 4 +++- linux-core/xgi_drv.h | 9 ++------- linux-core/xgi_fb.c | 15 +++++++++------ linux-core/xgi_pcie.c | 19 +------------------ 4 files changed, 15 insertions(+), 32 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 33155827..e1653021 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -202,7 +202,9 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) xgi_waitfor_pci_idle(info); } - xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL); + xgi_free(info, (XGI_MEMLOC_NON_LOCAL + | info->cmdring.ring_gart_base), + NULL); info->cmdring.ring_hw_base = 0; info->cmdring.ring_offset = 0; info->cmdring.size = 0; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 79276b70..a7740ceb 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 4 +#define DRIVER_PATCHLEVEL 5 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -89,8 +89,6 @@ struct xgi_info { }; extern struct kmem_cache *xgi_mem_block_cache; -extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, unsigned int end); extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); @@ -100,15 +98,12 @@ extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp); -extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, +extern int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_lut_cleanup(struct xgi_info * info); -extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, - struct drm_file * filp); - extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index f8341a67..26e6fc4e 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -169,8 +169,8 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, return (used_block); } -int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp) +static int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + struct drm_file * filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -287,13 +287,16 @@ int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, } -int xgi_fb_free(struct xgi_info * info, unsigned long offset, - struct drm_file * filp) +int xgi_free(struct xgi_info * info, unsigned long index, + struct drm_file * filp) { int err = 0; + const unsigned heap = index & 0x03; mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free(&info->fb_heap, offset, filp); + err = xgi_mem_free((heap == XGI_MEMLOC_NON_LOCAL) + ? &info->pcie_heap : &info->fb_heap, + (index & ~0x03), filp); mutex_unlock(&info->dev->struct_mutex); return err; @@ -305,7 +308,7 @@ int xgi_fb_free_ioctl(struct drm_device * dev, void * data, { struct xgi_info *info = dev->dev_private; - return xgi_fb_free(info, *(u32 *) data, filp); + return xgi_free(info, XGI_MEMLOC_LOCAL | *(u32 *) data, filp); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b2edf3b1..281223b3 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -152,29 +152,12 @@ int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, } -int xgi_pcie_free(struct xgi_info * info, unsigned long offset, - struct drm_file * filp) -{ - int err; - - mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free(&info->pcie_heap, offset, filp); - mutex_unlock(&info->dev->struct_mutex); - - if (err) { - DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); - } - - return err; -} - - int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, struct drm_file * filp) { struct xgi_info *info = dev->dev_private; - return xgi_pcie_free(info, *(u32 *) data, filp); + return xgi_free(info, XGI_MEMLOC_NON_LOCAL | *(u32 *) data, filp); } From a4759b85139dd8d81de25e170777309b770f5316 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Tue, 7 Aug 2007 23:09:44 +0200 Subject: [PATCH 227/437] nouveau : fix enable irq (in the previous code all irq were masked by engine init after irq_postinstall) --- shared-core/nouveau_irq.c | 3 +++ shared-core/nouveau_state.c | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 2ee77d83..84a37040 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -39,6 +39,9 @@ void nouveau_irq_preinstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + /* TODO this should be removed as this stuff is done in + * engine.*init + */ DRM_DEBUG("IRQ: preinst\n"); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index a23d6001..e80e77a5 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -296,9 +296,6 @@ nouveau_card_init(struct drm_device *dev) engine = &dev_priv->Engine; dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; - ret = drm_irq_install(dev); - if (ret) return ret; - INIT_LIST_HEAD(&dev_priv->gpuobj_list); /* Initialise instance memory, must happen before mem_init so we @@ -337,6 +334,12 @@ nouveau_card_init(struct drm_device *dev) ret = engine->fifo.init(dev); if (ret) return ret; + /* this call irq_preinstall, register irq handler and + * call irq_postinstall + */ + ret = drm_irq_install(dev); + if (ret) return ret; + /* what about PVIDEO/PCRTC/PRAMDAC etc? */ ret = nouveau_dma_channel_init(dev); From 4ad487190d5b79947c65e238330506db6b77e523 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Aug 2007 10:42:12 +1000 Subject: [PATCH 228/437] nouveau: enable/disable engine-specific interrupts in _init()/_takedown() All interrupts are still masked by PMC until init is finished. --- shared-core/nouveau_fifo.c | 4 ++ shared-core/nouveau_irq.c | 81 -------------------------------------- shared-core/nv04_graph.c | 4 ++ shared-core/nv04_mc.c | 2 - shared-core/nv10_graph.c | 2 +- shared-core/nv20_graph.c | 2 +- shared-core/nv30_graph.c | 2 +- shared-core/nv40_graph.c | 2 +- shared-core/nv40_mc.c | 2 - shared-core/nv50_fifo.c | 12 ++++++ shared-core/nv50_graph.c | 12 +++++- shared-core/nv50_mc.c | 1 + 12 files changed, 36 insertions(+), 90 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index fd21d2f3..f9677514 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -120,6 +120,10 @@ int nouveau_fifo_init(struct drm_device *dev) NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); + /* Enable PFIFO error reporting */ + NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); + NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); + NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ret = nouveau_fifo_instmem_configure(dev); diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 84a37040..f110340b 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -39,37 +39,7 @@ void nouveau_irq_preinstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - /* TODO this should be removed as this stuff is done in - * engine.*init - */ - DRM_DEBUG("IRQ: preinst\n"); - - if (!dev_priv) { - DRM_ERROR("AIII, no dev_priv\n"); - return; - } - if (!dev_priv->mmio) { - DRM_ERROR("AIII, no dev_priv->mmio\n"); - return; - } - - /* Disable/Clear PFIFO interrupts */ - NV_WRITE(NV03_PFIFO_INTR_EN_0, 0); - NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); - /* Disable/Clear PGRAPH interrupts */ - if (dev_priv->card_typedev_private; - if (!dev_priv) { - DRM_ERROR("AIII, no dev_priv\n"); - return; - } - if (!dev_priv->mmio) { - DRM_ERROR("AIII, no dev_priv->mmio\n"); - return; - } - - DRM_DEBUG("IRQ: postinst\n"); - - /* Enable PFIFO error reporting */ - NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); - NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); - - /* Enable PGRAPH interrupts */ - if (dev_priv->card_typedev_private; - if (!dev_priv) { - DRM_ERROR("AIII, no dev_priv\n"); - return; - } - if (!dev_priv->mmio) { - DRM_ERROR("AIII, no dev_priv->mmio\n"); - return; - } - - DRM_DEBUG("IRQ: uninst\n"); - - /* Disable PFIFO interrupts */ - NV_WRITE(NV03_PFIFO_INTR_EN_0, 0); - /* Disable PGRAPH interrupts */ - if (dev_priv->card_typectx_table->instance >> 4); - NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); + NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index c79b63cc..c1464bc2 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1636,8 +1636,8 @@ nv40_graph_init(struct drm_device *dev) /* No context present currently */ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); - NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); + NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); diff --git a/shared-core/nv40_mc.c b/shared-core/nv40_mc.c index 8bb6b083..c7db9023 100644 --- a/shared-core/nv40_mc.c +++ b/shared-core/nv40_mc.c @@ -14,8 +14,6 @@ nv40_mc_init(struct drm_device *dev) */ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); - NV_WRITE(NV03_PMC_INTR_EN_0, 0); - switch (dev_priv->chipset) { case 0x44: case 0x46: /* G72 */ diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index a5e79260..f915d332 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -119,6 +119,17 @@ nv50_fifo_init_reset(struct drm_device *dev) NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO); } +static void +nv50_fifo_init_intr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + + NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); + NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); +} + static void nv50_fifo_init_context_table(struct drm_device *dev) { @@ -190,6 +201,7 @@ nv50_fifo_init(struct drm_device *dev) dev_priv->Engine.fifo.priv = priv; nv50_fifo_init_reset(dev); + nv50_fifo_init_intr(dev); if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, (128+2)*4, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 59c8cfeb..f98fe601 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -44,6 +44,16 @@ nv50_graph_init_reset(struct drm_device *dev) NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH); } +static void +nv50_graph_init_intr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff); + NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff); +} + static void nv50_graph_init_regs__nv(struct drm_device *dev) { @@ -59,7 +69,6 @@ nv50_graph_init_regs__nv(struct drm_device *dev) NV_WRITE(0x402000, 0xc0000000); NV_WRITE(0x400108, 0xffffffff); - NV_WRITE(0x400100, 0xffffffff); NV_WRITE(0x400824, 0x00004000); NV_WRITE(0x400500, 0x00010001); @@ -174,6 +183,7 @@ nv50_graph_init(struct drm_device *dev) DRM_DEBUG("\n"); nv50_graph_init_reset(dev); + nv50_graph_init_intr(dev); nv50_graph_init_regs__nv(dev); nv50_graph_init_regs(dev); nv50_graph_init_ctxctl(dev); diff --git a/shared-core/nv50_mc.c b/shared-core/nv50_mc.c index 952dea9f..b111826b 100644 --- a/shared-core/nv50_mc.c +++ b/shared-core/nv50_mc.c @@ -34,6 +34,7 @@ nv50_mc_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); + return 0; } From 296050eee6ca7b496e8702ceca9628de803d79f8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Aug 2007 13:01:29 +1000 Subject: [PATCH 229/437] nouveau/nv50: hack up initial channel context from current state We really should be providing static values like the nv40 PGRAPH code does, however, this will do for now to keep X at least working. --- shared-core/nv50_graph.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index f98fe601..e5bbf65e 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -202,6 +202,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->Engine; struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; int grctx_size = 0x60000, hdr; int ret; @@ -224,6 +225,11 @@ nv50_graph_create_context(struct nouveau_channel *chan) INSTANCE_WR(ramin, (hdr + 0x10)/4, 0); INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); + if ((ret = engine->graph.load_context(chan))) { + DRM_ERROR("Error hacking up initial context: %d\n", ret); + return ret; + } + return 0; } From 40f21563564332786ca2b9ffc7d7ba9c7e6f7f1a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Aug 2007 16:11:28 +1000 Subject: [PATCH 230/437] nouveau: return channel id --- shared-core/nouveau_irq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index f110340b..03c466de 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -275,6 +275,7 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) return -EINVAL; } + *channel_ret = channel; return 0; } From 05633ca3708f48cfbbb77518da4e791d7e1613c2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Aug 2007 16:37:55 +1000 Subject: [PATCH 231/437] nouveau: Always allocate drm's push buffer in VRAM Fixes #11868 --- shared-core/nouveau_dma.c | 11 +++++- shared-core/nouveau_drv.h | 1 + shared-core/nouveau_fifo.c | 78 ++++++++++++++++++++------------------ 3 files changed, 53 insertions(+), 37 deletions(-) diff --git a/shared-core/nouveau_dma.c b/shared-core/nouveau_dma.c index ce5b6299..ab502e6a 100644 --- a/shared-core/nouveau_dma.c +++ b/shared-core/nouveau_dma.c @@ -37,13 +37,22 @@ nouveau_dma_channel_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_drm_channel *dchan = &dev_priv->channel; struct nouveau_gpuobj *gpuobj = NULL; + struct mem_block *pushbuf; int grclass, ret, i; DRM_DEBUG("\n"); + pushbuf = nouveau_mem_alloc(dev, 0, 0x8000, + NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED, + (struct drm_file *)-2); + if (!pushbuf) { + DRM_ERROR("Failed to allocate DMA push buffer\n"); + return -ENOMEM; + } + /* Allocate channel */ ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2, - NvDmaFB, NvDmaTT); + pushbuf, NvDmaFB, NvDmaTT); if (ret) { DRM_ERROR("Error allocating GPU channel: %d\n", ret); return ret; diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 10f9149e..572df46e 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -365,6 +365,7 @@ extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, extern int nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan, struct drm_file *file_priv, + struct mem_block *pushbuf, uint32_t fb_ctxdma, uint32_t tt_ctxdma); extern void nouveau_fifo_free(struct nouveau_channel *); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index f9677514..22bced14 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -190,46 +190,30 @@ int nouveau_fifo_init(struct drm_device *dev) } static int -nouveau_fifo_cmdbuf_alloc(struct nouveau_channel *chan) +nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_config *config = &dev_priv->config; - struct mem_block *cb; - int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); + struct mem_block *pb = chan->pushbuf_mem; struct nouveau_gpuobj *pushbuf = NULL; int ret; - /* Defaults for unconfigured values */ - if (!config->cmdbuf.location) - config->cmdbuf.location = NOUVEAU_MEM_FB; - if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size) - config->cmdbuf.size = cb_min_size; - - cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, - config->cmdbuf.location | NOUVEAU_MEM_MAPPED, - (struct drm_file *)-2); - if (!cb) { - DRM_ERROR("Couldn't allocate DMA command buffer.\n"); - return -ENOMEM; - } - - if (cb->flags & NOUVEAU_MEM_AGP) { - ret = nouveau_gpuobj_gart_dma_new(chan, cb->start, cb->size, + if (pb->flags & NOUVEAU_MEM_AGP) { + ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size, NV_DMA_ACCESS_RO, &pushbuf, &chan->pushbuf_base); } else - if (cb->flags & NOUVEAU_MEM_PCI) { + if (pb->flags & NOUVEAU_MEM_PCI) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, + pb->start, pb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf); chan->pushbuf_base = 0; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, + pb->start, pb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); chan->pushbuf_base = 0; @@ -239,19 +223,13 @@ nouveau_fifo_cmdbuf_alloc(struct nouveau_channel *chan) * VRAM. */ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - cb->start + + pb->start + drm_get_resource_start(dev, 1), - cb->size, NV_DMA_ACCESS_RO, + pb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); chan->pushbuf_base = 0; } - if (ret) { - nouveau_mem_free(dev, cb); - DRM_ERROR("Error creating push buffer ctxdma: %d\n", ret); - return ret; - } - if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf))) { DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); @@ -260,14 +238,36 @@ nouveau_fifo_cmdbuf_alloc(struct nouveau_channel *chan) return ret; } - chan->pushbuf_mem = cb; return 0; } +static struct mem_block * +nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_config *config = &dev_priv->config; + struct mem_block *pb; + int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); + + /* Defaults for unconfigured values */ + if (!config->cmdbuf.location) + config->cmdbuf.location = NOUVEAU_MEM_FB; + if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size) + config->cmdbuf.size = pb_min_size; + + pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, + config->cmdbuf.location | NOUVEAU_MEM_MAPPED, + (struct drm_file *)-2); + if (!pb) + DRM_ERROR("Couldn't allocate DMA push buffer.\n"); + + return pb; +} + /* allocates and initializes a fifo for user space consumption */ int nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, - struct drm_file *file_priv, + struct drm_file *file_priv, struct mem_block *pushbuf, uint32_t vram_handle, uint32_t tt_handle) { int ret; @@ -303,6 +303,7 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, chan->dev = dev; chan->id = channel; chan->file_priv = file_priv; + chan->pushbuf_mem = pushbuf; DRM_INFO("Allocating FIFO number %d\n", channel); @@ -320,8 +321,8 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, return ret; } - /* allocate a command buffer, and create a dma object for the gpu */ - ret = nouveau_fifo_cmdbuf_alloc(chan); + /* Create a dma object for the push buffer */ + ret = nouveau_fifo_pushbuf_ctxdma_init(chan); if (ret) { nouveau_fifo_free(chan); return ret; @@ -467,6 +468,7 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct drm_nouveau_channel_alloc *init = data; struct drm_map_list *entry; struct nouveau_channel *chan; + struct mem_block *pushbuf; int res; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; @@ -474,7 +476,11 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return -EINVAL; - res = nouveau_fifo_alloc(dev, &chan, file_priv, + pushbuf = nouveau_fifo_user_pushbuf_alloc(dev); + if (!pushbuf) + return -ENOMEM; + + res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf, init->fb_ctxdma_handle, init->tt_ctxdma_handle); if (res) From e326acf5493a7193954d3dd794855e2a11dc1782 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Wed, 8 Aug 2007 22:55:32 +0200 Subject: [PATCH 232/437] nouveau : nv10, nv20, nv30 : don't save all channel in the same RAMFC entry This should improve multi fifo --- shared-core/nv10_fifo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index 47af0ff0..a056460d 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -33,7 +33,7 @@ NV10_RAMFC_##offset/4, (val)) #define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ NV10_RAMFC_##offset/4) -#define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE) +#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) int From 7281463f8d5d45a26f4cdff3fb67d896e0e74f74 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 9 Aug 2007 10:23:36 +1000 Subject: [PATCH 233/437] nouveau/nv40: add some missing pciids. --- shared-core/drm_pciids.txt | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 126974d0..4bd690b2 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -510,8 +510,9 @@ 0x10de 0x0091 NV_40 "GeForce 7800 GTX" 0x10de 0x0092 NV_40 "GeForce 7800 GT" 0x10de 0x0093 NV_40 "GeForce 7800 GS" +0x10de 0x0095 NV_40 "GeForce 7800 SLI" 0x10de 0x0098 NV_40 "GeForce Go 7800" -0x10de 0x0099 NV_40 "GE Force Go 7800 GTX" +0x10de 0x0099 NV_40 "GeForce Go 7800 GTX" 0x10de 0x009d NV_40 "Quadro FX4500" 0x10de 0x00a0 NV_04 "Aladdin TNT2" 0x10de 0x00c0 NV_40 "GeForce 6800 GS" @@ -547,13 +548,16 @@ 0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go" 0x10de 0x0140 NV_40 "GeForce 6600 GT" 0x10de 0x0141 NV_40 "GeForce 6600" -0x10de 0x0142 NV_40 "GeForce 6600 PCIe" +0x10de 0x0142 NV_40 "GeForce 6600 LE" +0x10de 0x0143 NV_40 "GeForce 6600 VE" 0x10de 0x0144 NV_40 "GeForce Go 6600" 0x10de 0x0145 NV_40 "GeForce 6610 XL" 0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE" +0x10de 0x0147 NV_40 "GeForce 6700 XL" 0x10de 0x0148 NV_40 "GeForce Go 6600" 0x10de 0x0149 NV_40 "GeForce Go 6600 GT" 0x10de 0x014a NV_40 "Quadro NVS 440" +0x10de 0x014c NV_40 "Quadro FX 550" 0x10de 0x014d NV_17 "Quadro FX 550" 0x10de 0x014e NV_40 "Quadro FX 540" 0x10de 0x014f NV_40 "GeForce 6200" @@ -561,6 +565,7 @@ 0x10de 0x0151 NV_15 "GeForce2 Ti" 0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner" 0x10de 0x0153 NV_15 "Quadro2 Pro" +0x10de 0x0160 NV_44 "GeForce 6500" 0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)" 0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)" 0x10de 0x0163 NV_44 "GeForce 6200 LE" @@ -569,6 +574,7 @@ 0x10de 0x0166 NV_44 "GeForce Go 6400" 0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache" 0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache" +0x10de 0x0169 NV_44 "GeForce 6250" 0x10de 0x0170 NV_17 "GeForce4 MX 460" 0x10de 0x0171 NV_17 "GeForce4 MX 440" 0x10de 0x0172 NV_17 "GeForce4 MX 420" @@ -601,11 +607,16 @@ 0x10de 0x019e NV_50 "Quadro FX 4600" 0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" 0x10de 0x01d1 NV_44 "GeForce 7300 LE" +0x10de 0x01d3 NV_44 "Geforce 7300 SE" 0x10de 0x01d6 NV_44 "GeForce Go 7200" 0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300" 0x10de 0x01d8 NV_44 "GeForce Go 7400" +0x10de 0x01d9 NV_44 "GeForce Go 7400 GS" 0x10de 0x01da NV_44 "Quadro NVS 110M" +0x10de 0x01db NV_44 "Quadro NVS 120M" 0x10de 0x01dc NV_44 "Quadro FX 350M" +0x10de 0x01dd NV_44 "GeForce 7500 LE" +0x10de 0x01de NV_44 "Quadro FX 350" 0x10de 0x01df NV_44 "GeForce 7300 GS" 0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU" 0x10de 0x0200 NV_20 "GeForce3" @@ -617,9 +628,12 @@ 0x10de 0x0215 NV_40 "GeForce 6800 GT" 0x10de 0x0218 NV_40 "GeForce 6800 XT" 0x10de 0x0221 NV_44 "GeForce 6200" +0x10de 0x0222 NV_44 "GeForce 6200 A-LE" 0x10de 0x0240 NV_44 "GeForce 6150" +0x10de 0x0241 NV_44 "GeForce 6150 LE" 0x10de 0x0242 NV_44 "GeForce 6100" -0x10de 0x0244 NV_44 "GeForce 6150 Go" +0x10de 0x0244 NV_44 "GeForce Go 6150" +0x10de 0x0247 NV_44 "GeForce Go 6100" 0x10de 0x0250 NV_25 "GeForce4 Ti 4600" 0x10de 0x0251 NV_25 "GeForce4 Ti 4400" 0x10de 0x0252 NV_25 "GeForce4 Ti" @@ -700,7 +714,15 @@ 0x10de 0x0391 NV_40 "GeForce 7600 GT" 0x10de 0x0392 NV_40 "GeForce 7600 GS" 0x10de 0x0393 NV_40 "GeForce 7300 GT" +0x10de 0x0394 NV_40 "GeForce 7600 LE" +0x10de 0x0395 NV_40 "GeForce 7300 GT" +0x10de 0x0397 NV_40 "GeForce Go 7700" 0x10de 0x0398 NV_40 "GeForce Go 7600" +0x10de 0x0399 NV_40 "GeForce Go 7600 GT" +0x10de 0x039a NV_40 "Quadro NVS 300M" +0x10de 0x039b NV_40 "GeForce Go 7900 SE" +0x10de 0x039c NV_40 "Quadro FX 550M" +0x10de 0x039e NV_40 "Quadro FX 560" 0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430" 0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" 0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" From 7784e8c6e74b93ffb39d82e3385bd3268a55507c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 9 Aug 2007 11:12:13 +1000 Subject: [PATCH 234/437] nouveau: silence irq handler a bit --- shared-core/nouveau_irq.c | 87 +++++++++------------------------------ 1 file changed, 20 insertions(+), 67 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 03c466de..d8a2c1b8 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -72,12 +72,10 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev) chstat = NV_READ(NV04_PFIFO_DMA); channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat); - if (status & NV_PFIFO_INTR_CACHE_ERROR) { uint32_t c1get, c1method, c1data; - DRM_ERROR("NV: PFIFO error interrupt\n"); + DRM_ERROR("PFIFO error interrupt\n"); c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2; if (dev_priv->card_type < NV_40) { @@ -89,17 +87,17 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev) c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get)); } - DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n", - channel, (c1method >> 13) & 7, - c1method & 0x1ffc, c1data - ); + DRM_ERROR("Channel %d/%d - Method 0x%04x, Data 0x%08x\n", + channel, (c1method >> 13) & 7, c1method & 0x1ffc, + c1data); status &= ~NV_PFIFO_INTR_CACHE_ERROR; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); } if (status & NV_PFIFO_INTR_DMA_PUSHER) { - DRM_INFO("NV: PFIFO DMA pusher interrupt\n"); + DRM_ERROR("PFIFO DMA pusher interrupt: ch%d, 0x%08x\n", + channel, NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); status &= ~NV_PFIFO_INTR_DMA_PUSHER; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); @@ -113,7 +111,7 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev) } if (status) { - DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status); + DRM_ERROR("Unhandled PFIFO interrupt: status=0x%08x\n", status); NV_WRITE(NV03_PFIFO_INTR_0, status); } @@ -311,77 +309,31 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) ARRAY_SIZE(nouveau_nstatus_names)); printk("\n"); - DRM_ERROR("NV: Channel %d/%d (class 0x%04x) - " - "Method 0x%04x, Data 0x%08x\n", - channel, subc, class, method, data - ); + DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x\n", + channel, subc, class, method, data); } static void nouveau_pgraph_irq_handler(struct drm_device *dev) { - uint32_t status; struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t status, nsource; status = NV_READ(NV03_PGRAPH_INTR); if (!status) return; + nsource = NV_READ(NV03_PGRAPH_NSOURCE); if (status & NV_PGRAPH_INTR_NOTIFY) { - uint32_t nsource, nstatus, instance, notify; - DRM_DEBUG("NV: PGRAPH notify interrupt\n"); + DRM_DEBUG("PGRAPH notify interrupt\n"); - nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - nsource = NV_READ(NV03_PGRAPH_NSOURCE); - DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); - - /* if this wasn't NOTIFICATION_PENDING, dump extra trap info */ - if (nsource & ~(1<<0)) { - nouveau_graph_dump_trap_info(dev); - } else { - instance = NV_READ(0x00400158); - notify = NV_READ(0x00400150) >> 16; - DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", - instance, notify); - } + nouveau_graph_dump_trap_info(dev); status &= ~NV_PGRAPH_INTR_NOTIFY; NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); } - if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) { - uint32_t nsource, nstatus, instance, notify; - DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n"); - - nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - nsource = NV_READ(NV03_PGRAPH_NSOURCE); - DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); - - instance = NV_READ(0x00400158); - notify = NV_READ(0x00400150) >> 16; - DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify); - - status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_BUFFER_NOTIFY); - } - - if (status & NV_PGRAPH_INTR_MISSING_HW) { - DRM_ERROR("NV: PGRAPH missing hw interrupt\n"); - - status &= ~NV_PGRAPH_INTR_MISSING_HW; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_MISSING_HW); - } - if (status & NV_PGRAPH_INTR_ERROR) { - uint32_t nsource, nstatus, instance; - - DRM_ERROR("NV: PGRAPH error interrupt\n"); - - nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - nsource = NV_READ(NV03_PGRAPH_NSOURCE); - DRM_ERROR("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); - - instance = NV_READ(0x00400158); - DRM_ERROR("instance:0x%08x\n", instance); + DRM_ERROR("PGRAPH error interrupt\n"); nouveau_graph_dump_trap_info(dev); @@ -391,7 +343,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel); + DRM_DEBUG("PGRAPH context switch interrupt channel %x\n",channel); switch(dev_priv->card_type) { case NV_04: @@ -408,7 +360,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) nouveau_nv20_context_switch(dev); break; default: - DRM_INFO("NV: Context switch not implemented\n"); + DRM_ERROR("Context switch not implemented\n"); break; } @@ -417,7 +369,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) } if (status) { - DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status); + DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status); NV_WRITE(NV03_PGRAPH_INTR, status); } @@ -427,6 +379,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) { struct drm_nouveau_private *dev_priv = dev->dev_private; + if (crtc&1) { NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); } @@ -446,16 +399,16 @@ irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) if (!status) return IRQ_NONE; - DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status); - if (status & NV_PMC_INTR_0_PFIFO_PENDING) { nouveau_fifo_irq_handler(dev); status &= ~NV_PMC_INTR_0_PFIFO_PENDING; } + if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { nouveau_pgraph_irq_handler(dev); status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; } + if (status & NV_PMC_INTR_0_CRTCn_PENDING) { nouveau_crtc_irq_handler(dev, (status>>24)&3); status &= ~NV_PMC_INTR_0_CRTCn_PENDING; From 25cb876f8513d02d4d189371eaa8b7b9a88e860d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 15:23:13 -0700 Subject: [PATCH 235/437] Associate master file pointer with command list buffer. Pass the master's file pointer, as supplied to xgi_bootstrap, to xgi_cmdlist_initialize. Associate that pointer with the memory allocated for the command list buffer. By doing this the memory will be automatically cleaned up when the master closes the device. This allows the removal of some clean up code. --- linux-core/xgi_cmdlist.c | 13 ++++--------- linux-core/xgi_cmdlist.h | 9 ++------- linux-core/xgi_drv.c | 2 +- linux-core/xgi_drv.h | 4 ++-- 4 files changed, 9 insertions(+), 19 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e1653021..a1ec5720 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -34,7 +34,8 @@ static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); -int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) +int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, + struct drm_file * filp) { struct xgi_mem_alloc mem_alloc = { .location = XGI_MEMLOC_NON_LOCAL, @@ -42,7 +43,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) }; int err; - err = xgi_alloc(info, &mem_alloc, 0); + err = xgi_alloc(info, &mem_alloc, filp); if (err) { return err; } @@ -50,7 +51,6 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr); info->cmdring.size = mem_alloc.size; info->cmdring.ring_hw_base = mem_alloc.hw_addr; - info->cmdring.ring_gart_base = mem_alloc.offset; info->cmdring.last_ptr = NULL; info->cmdring.ring_offset = 0; @@ -202,12 +202,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) xgi_waitfor_pci_idle(info); } - xgi_free(info, (XGI_MEMLOC_NON_LOCAL - | info->cmdring.ring_gart_base), - NULL); - info->cmdring.ring_hw_base = 0; - info->cmdring.ring_offset = 0; - info->cmdring.size = 0; + (void) memset(&info->cmdring, 0, sizeof(info->cmdring)); } } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 604c9aac..07a2eb98 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -72,12 +72,6 @@ struct xgi_cmdring_info { */ unsigned int ring_hw_base; - /** - * Offset, in bytes, from the base of PCI-e GART space to the start - * of the ring. - */ - unsigned long ring_gart_base; - u32 * last_ptr; /** @@ -88,7 +82,8 @@ struct xgi_cmdring_info { }; struct xgi_info; -extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); +extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, + struct drm_file * filp); extern int xgi_state_change(struct xgi_info * info, unsigned int to, unsigned int from); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index b6fea437..9aaeb467 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -248,7 +248,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, } /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - err = xgi_cmdlist_initialize(info, 0x100000); + err = xgi_cmdlist_initialize(info, 0x100000, filp); if (err) { DRM_ERROR("xgi_cmdlist_initialize() failed\n"); return err; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index a7740ceb..9c0f5a28 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -34,11 +34,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070806" +#define DRIVER_DATE "20070809" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 5 +#define DRIVER_PATCHLEVEL 6 #include "xgi_cmdlist.h" #include "xgi_drm.h" From aea6b4dea9708f66f5fc2068fe84407682570aca Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 15:30:36 -0700 Subject: [PATCH 236/437] Unify alloc and free ioctls. The DRM_XGI_PCIE_ALLOC and DRM_XGI_FB_ALLOC ioctls (and the matching free ioctls) are unified to DRM_XGI_ALLOC. The desired memory region is selected by xgi_mem_alloc::location. The region is magically encoded in xgi_mem_alloc::index, which is used to release the memory. Bump to version 0.11.0. This update requires a new DDX. --- linux-core/xgi_drv.c | 7 ++--- linux-core/xgi_drv.h | 14 ++++------ linux-core/xgi_fb.c | 65 +++++++++++++++++++++++++++---------------- linux-core/xgi_pcie.c | 21 -------------- shared-core/xgi_drm.h | 46 +++++++++++++++++++----------- 5 files changed, 78 insertions(+), 75 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 9aaeb467..997051fb 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -42,11 +42,8 @@ static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_XGI_FB_ALLOC, xgi_fb_alloc_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_FB_FREE, xgi_fb_free_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_XGI_PCIE_ALLOC, xgi_pcie_alloc_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_PCIE_FREE, xgi_pcie_free_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_GE_RESET, xgi_ge_reset_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_DUMP_REGISTER, xgi_dump_register_ioctl, DRM_AUTH), diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 9c0f5a28..e56d00bb 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -37,8 +37,8 @@ #define DRIVER_DATE "20070809" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 6 +#define DRIVER_MINOR 11 +#define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -89,6 +89,8 @@ struct xgi_info { }; extern struct kmem_cache *xgi_mem_block_cache; +extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, unsigned int end); extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); @@ -109,13 +111,9 @@ extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, struct drm_file *); -extern int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, +extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); -extern int xgi_fb_free_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, +extern int xgi_free_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 26e6fc4e..d0182831 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -169,8 +169,8 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, return (used_block); } -static int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp) +int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + struct drm_file * filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -243,26 +243,45 @@ static int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - struct drm_file * filp) + struct drm_file * filp) { struct xgi_mem_block *block; + struct xgi_mem_heap *heap; + const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL) + ? "on-card" : "GART"; + + + if ((alloc->location != XGI_MEMLOC_LOCAL) + && (alloc->location != XGI_MEMLOC_NON_LOCAL)) { + DRM_ERROR("Invalid memory pool (0x%08x) specified.\n", + alloc->location); + return -EINVAL; + } + + heap = (alloc->location == XGI_MEMLOC_LOCAL) + ? &info->fb_heap : &info->pcie_heap; + + if (!heap->initialized) { + DRM_ERROR("Attempt to allocate from uninitialized memory " + "pool (0x%08x).\n", alloc->location); + return -EINVAL; + } mutex_lock(&info->dev->struct_mutex); - block = xgi_mem_alloc((alloc->location == XGI_MEMLOC_LOCAL) - ? &info->fb_heap : &info->pcie_heap, - alloc->size); + block = xgi_mem_alloc(heap, alloc->size); mutex_unlock(&info->dev->struct_mutex); if (block == NULL) { alloc->size = 0; - DRM_ERROR("Video RAM allocation failed\n"); + DRM_ERROR("%s memory allocation failed\n", mem_name); return -ENOMEM; } else { - DRM_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *)block->offset); + DRM_DEBUG("%s memory allocation succeeded: 0x%p\n", + mem_name, (char *)block->offset); alloc->size = block->size; alloc->offset = block->offset; alloc->hw_addr = block->offset; + alloc->index = alloc->offset | alloc->location; if (alloc->location == XGI_MEMLOC_NON_LOCAL) { alloc->hw_addr += info->pcie.base; @@ -275,47 +294,45 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } -int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) +int xgi_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - struct xgi_mem_alloc *alloc = - (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - alloc->location = XGI_MEMLOC_LOCAL; - return xgi_alloc(info, alloc, filp); + return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp); } int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp) { - int err = 0; - const unsigned heap = index & 0x03; + int err; + struct xgi_mem_heap *const heap = + ((index & 0x03) == XGI_MEMLOC_NON_LOCAL) + ? &info->pcie_heap : &info->fb_heap; + const u32 offset = (index & ~0x03); mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free((heap == XGI_MEMLOC_NON_LOCAL) - ? &info->pcie_heap : &info->fb_heap, - (index & ~0x03), filp); + err = xgi_mem_free(heap, offset, filp); mutex_unlock(&info->dev->struct_mutex); return err; } -int xgi_fb_free_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) +int xgi_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { struct xgi_info *info = dev->dev_private; - return xgi_free(info, XGI_MEMLOC_LOCAL | *(u32 *) data, filp); + return xgi_free(info, *(unsigned long *) data, filp); } int xgi_fb_heap_init(struct xgi_info * info) { return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START, - info->fb.size); + info->fb.size - XGI_FB_HEAP_START); } /** diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 281223b3..4ec9b6ff 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -140,27 +140,6 @@ int xgi_pcie_heap_init(struct xgi_info * info) } -int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_mem_alloc *const alloc = - (struct xgi_mem_alloc *) data; - struct xgi_info *info = dev->dev_private; - - alloc->location = XGI_MEMLOC_NON_LOCAL; - return xgi_alloc(info, alloc, filp); -} - - -int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - return xgi_free(info, XGI_MEMLOC_NON_LOCAL | *(u32 *) data, filp); -} - - /** * xgi_find_pcie_virt * @address: GE HW address diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index adce7066..c980a35a 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -60,10 +60,20 @@ enum xgi_mem_location { }; struct xgi_mem_alloc { + /** + * Memory region to be used for allocation. + * + * Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL. + */ unsigned int location; + + /** + * Number of bytes request. + * + * On successful allocation, set to the actual number of bytes + * allocated. + */ unsigned int size; - unsigned int is_front; - unsigned int owner; /** * Address of the memory from the graphics hardware's point of view. @@ -74,6 +84,13 @@ struct xgi_mem_alloc { * Offset of the allocation in the mapping. */ __u32 offset; + + /** + * Magic handle used to release memory. + * + * See also DRM_XGI_FREE ioctl. + */ + unsigned long index; }; enum xgi_batch_type { @@ -102,24 +119,19 @@ struct xgi_state_info { */ #define DRM_XGI_BOOTSTRAP 0 -#define DRM_XGI_FB_ALLOC 1 -#define DRM_XGI_FB_FREE 2 -#define DRM_XGI_PCIE_ALLOC 3 -#define DRM_XGI_PCIE_FREE 4 -#define DRM_XGI_SUBMIT_CMDLIST 5 -#define DRM_XGI_GE_RESET 6 -#define DRM_XGI_DUMP_REGISTER 7 -#define DRM_XGI_DEBUG_INFO 8 -#define DRM_XGI_TEST_RWINKERNEL 9 -#define DRM_XGI_STATE_CHANGE 10 +#define DRM_XGI_ALLOC 1 +#define DRM_XGI_FREE 2 +#define DRM_XGI_SUBMIT_CMDLIST 3 +#define DRM_XGI_GE_RESET 4 +#define DRM_XGI_DUMP_REGISTER 5 +#define DRM_XGI_DEBUG_INFO 6 +#define DRM_XGI_TEST_RWINKERNEL 7 +#define DRM_XGI_STATE_CHANGE 8 #define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap) -#define XGI_IOCTL_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_FB_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_FB_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FB_FREE, __u32) - -#define XGI_IOCTL_PCIE_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_PCIE_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_PCIE_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_PCIE_FREE, __u32) +#define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc) +#define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32) #define XGI_IOCTL_GE_RESET DRM_IO(DRM_COMMAND_BASE + DRM_XGI_GE_RESET) #define XGI_IOCTL_DUMP_REGISTER DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DUMP_REGISTER) From dbd4d0597ff32458bbe4347bdea0e4b9e55a14da Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 16:01:14 -0700 Subject: [PATCH 237/437] Use sman memory manager instead of internal version. --- linux-core/xgi_drv.c | 46 +++---- linux-core/xgi_drv.h | 34 +---- linux-core/xgi_fb.c | 283 ++++-------------------------------------- linux-core/xgi_pcie.c | 6 +- 4 files changed, 55 insertions(+), 314 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 997051fb..b0f84c81 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -225,7 +225,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, /* Init the resource manager */ - if (!info->fb_heap.initialized) { + if (!info->fb_heap_initialized) { err = xgi_fb_heap_init(info); if (err) { DRM_ERROR("Unable to initialize FB heap.\n"); @@ -237,7 +237,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, info->pcie.size = bs->gart.size; /* Init the resource manager */ - if (!info->pcie_heap.initialized) { + if (!info->pcie_heap_initialized) { err = xgi_pcie_heap_init(info); if (err) { DRM_ERROR("Unable to initialize GART heap.\n"); @@ -296,13 +296,13 @@ void xgi_driver_lastclose(struct drm_device * dev) info->mmio_map = NULL; info->fb_map = NULL; - if (info->fb_heap.initialized) { - xgi_mem_heap_cleanup(&info->fb_heap); + if (info->pcie_heap_initialized) { + xgi_pcie_lut_cleanup(info); } - if (info->pcie_heap.initialized) { - xgi_mem_heap_cleanup(&info->pcie_heap); - xgi_pcie_lut_cleanup(info); + if (info->fb_heap_initialized + || info->pcie_heap_initialized) { + drm_sman_cleanup(&info->sman); } } } @@ -314,12 +314,16 @@ void xgi_reclaim_buffers_locked(struct drm_device * dev, struct xgi_info * info = dev->dev_private; mutex_lock(&info->dev->struct_mutex); + if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) { + mutex_unlock(&info->dev->struct_mutex); + return; + } + if (dev->driver->dma_quiescent) { dev->driver->dma_quiescent(dev); } - xgi_free_all(info, &info->pcie_heap, filp); - xgi_free_all(info, &info->fb_heap, filp); + drm_sman_owner_cleanup(&info->sman, (unsigned long) filp); mutex_unlock(&info->dev->struct_mutex); return; } @@ -357,6 +361,7 @@ void xgi_kern_isr_bh(struct drm_device *dev) int xgi_driver_load(struct drm_device *dev, unsigned long flags) { struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); + int err; if (!info) return -ENOMEM; @@ -375,7 +380,8 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) if ((info->mmio.base == 0) || (info->mmio.size == 0)) { DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->mmio.base, info->mmio.size); - return -EINVAL; + err = -EINVAL; + goto fail; } @@ -386,28 +392,24 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) (unsigned long) info->fb.base, info->fb.size); - xgi_mem_block_cache = kmem_cache_create("xgi_mem_block", - sizeof(struct xgi_mem_block), - 0, - SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (xgi_mem_block_cache == NULL) { - return -ENOMEM; + err = drm_sman_init(&info->sman, 2, 12, 8); + if (err) { + goto fail; } return 0; + +fail: + drm_free(info, sizeof(*info), DRM_MEM_DRIVER); + return err; } int xgi_driver_unload(struct drm_device *dev) { struct xgi_info * info = dev->dev_private; - if (xgi_mem_block_cache) { - kmem_cache_destroy(xgi_mem_block_cache); - xgi_mem_block_cache = NULL; - } - + drm_sman_takedown(&info->sman); drm_free(info, sizeof(*info), DRM_MEM_DRIVER); dev->dev_private = NULL; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index e56d00bb..6b209aa4 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -29,6 +29,7 @@ #include "drmP.h" #include "drm.h" +#include "drm_sman.h" #define DRIVER_AUTHOR "Andrea Zhang " @@ -38,7 +39,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 1 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -48,22 +49,6 @@ struct xgi_aperture { unsigned int size; }; -struct xgi_mem_block { - struct list_head list; - unsigned long offset; - unsigned long size; - struct drm_file * filp; -}; - -struct xgi_mem_heap { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; - - bool initialized; -}; - struct xgi_info { struct drm_device *dev; @@ -82,19 +67,13 @@ struct xgi_info { struct drm_dma_handle *lut_handle; unsigned int lutPageSize; - struct xgi_mem_heap fb_heap; - struct xgi_mem_heap pcie_heap; + struct drm_sman sman; + bool fb_heap_initialized; + bool pcie_heap_initialized; struct xgi_cmdring_info cmdring; }; -extern struct kmem_cache *xgi_mem_block_cache; -extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp); -extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, - unsigned int end); -extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); - extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, @@ -108,9 +87,6 @@ extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, - struct drm_file *); - extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index d0182831..40f39fbc 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -28,225 +28,10 @@ #define XGI_FB_HEAP_START 0x1000000 -struct kmem_cache *xgi_mem_block_cache = NULL; - -static struct xgi_mem_block *xgi_mem_new_node(void); - - -int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start, - unsigned int end) -{ - struct xgi_mem_block *block; - - INIT_LIST_HEAD(&heap->free_list); - INIT_LIST_HEAD(&heap->used_list); - INIT_LIST_HEAD(&heap->sort_list); - heap->initialized = TRUE; - - block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); - if (!block) { - return -ENOMEM; - } - - block->offset = start; - block->size = end - start; - - list_add(&block->list, &heap->free_list); - - heap->max_freesize = end - start; - - return 0; -} - - -void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap) -{ - struct list_head *free_list; - struct xgi_mem_block *block; - struct xgi_mem_block *next; - int i; - - free_list = &heap->free_list; - for (i = 0; i < 3; i++, free_list++) { - list_for_each_entry_safe(block, next, free_list, list) { - DRM_INFO - ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - i, block->offset, block->size); - kmem_cache_free(xgi_mem_block_cache, block); - block = NULL; - } - } - - heap->initialized = 0; -} - - -struct xgi_mem_block *xgi_mem_new_node(void) -{ - struct xgi_mem_block *block = - kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); - - if (!block) { - DRM_ERROR("kmem_cache_alloc failed\n"); - return NULL; - } - - block->offset = 0; - block->size = 0; - block->filp = (struct drm_file *) -1; - - return block; -} - - -static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long originalSize) -{ - struct xgi_mem_block *block, *free_block, *used_block; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - - - DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", - originalSize, size); - - if (size == 0) { - DRM_ERROR("size == 0\n"); - return (NULL); - } - DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize); - if (size > heap->max_freesize) { - DRM_ERROR - ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", - size, heap->max_freesize); - return (NULL); - } - - list_for_each_entry(block, &heap->free_list, list) { - DRM_INFO("block: 0x%px \n", block); - if (size <= block->size) { - break; - } - } - - if (&block->list == &heap->free_list) { - DRM_ERROR - ("Can't allocate %ldk size from frame buffer memory !\n", - size / 1024); - return (NULL); - } - - free_block = block; - DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) { - used_block = free_block; - DRM_INFO("size == free_block->size: free_block = 0x%p\n", - free_block); - list_del(&free_block->list); - } else { - used_block = xgi_mem_new_node(); - - if (used_block == NULL) - return (NULL); - - if (used_block == free_block) { - DRM_ERROR("used_block == free_block = 0x%p\n", - used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - heap->max_freesize -= size; - - list_add(&used_block->list, &heap->used_list); - - return (used_block); -} - -int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp) -{ - struct xgi_mem_block *used_block = NULL, *block; - struct xgi_mem_block *prev, *next; - - unsigned long upper; - unsigned long lower; - - list_for_each_entry(block, &heap->used_list, list) { - if (block->offset == offset) { - break; - } - } - - if (&block->list == &heap->used_list) { - DRM_ERROR("can't find block: 0x%lx to free!\n", offset); - return -ENOENT; - } - - if (block->filp != filp) { - return -EPERM; - } - - used_block = block; - DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - list_for_each_entry(block, &heap->free_list, list) { - if (block->offset == upper) { - next = block; - } else if ((block->offset + block->size) == lower) { - prev = block; - } - } - - DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) { - prev->size += (used_block->size + next->size); - list_del(&next->list); - DRM_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_mem_block_cache, next); - kmem_cache_free(xgi_mem_block_cache, used_block); - } - else if (prev) { - prev->size += used_block->size; - DRM_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_mem_block_cache, used_block); - } - else if (next) { - next->size += used_block->size; - next->offset = used_block->offset; - DRM_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_mem_block_cache, used_block); - } - else { - list_add(&used_block->list, &heap->free_list); - DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - } - - return 0; -} - - int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp) { - struct xgi_mem_block *block; - struct xgi_mem_heap *heap; + struct drm_memblock_item *block; const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL) ? "on-card" : "GART"; @@ -258,17 +43,16 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, return -EINVAL; } - heap = (alloc->location == XGI_MEMLOC_LOCAL) - ? &info->fb_heap : &info->pcie_heap; - - if (!heap->initialized) { + if ((alloc->location == XGI_MEMLOC_LOCAL) + ? !info->fb_heap_initialized : !info->pcie_heap_initialized) { DRM_ERROR("Attempt to allocate from uninitialized memory " "pool (0x%08x).\n", alloc->location); return -EINVAL; } mutex_lock(&info->dev->struct_mutex); - block = xgi_mem_alloc(heap, alloc->size); + block = drm_sman_alloc(&info->sman, alloc->location, alloc->size, + 0, (unsigned long) filp); mutex_unlock(&info->dev->struct_mutex); if (block == NULL) { @@ -276,18 +60,17 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, DRM_ERROR("%s memory allocation failed\n", mem_name); return -ENOMEM; } else { - DRM_DEBUG("%s memory allocation succeeded: 0x%p\n", - mem_name, (char *)block->offset); - alloc->size = block->size; - alloc->offset = block->offset; - alloc->hw_addr = block->offset; - alloc->index = alloc->offset | alloc->location; + alloc->offset = (*block->mm->offset)(block->mm, + block->mm_info); + alloc->hw_addr = alloc->offset; + alloc->index = block->user_hash.key; if (alloc->location == XGI_MEMLOC_NON_LOCAL) { alloc->hw_addr += info->pcie.base; } - block->filp = filp; + DRM_DEBUG("%s memory allocation succeeded: 0x%x\n", + mem_name, alloc->offset); } return 0; @@ -307,13 +90,9 @@ int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp) { int err; - struct xgi_mem_heap *const heap = - ((index & 0x03) == XGI_MEMLOC_NON_LOCAL) - ? &info->pcie_heap : &info->fb_heap; - const u32 offset = (index & ~0x03); mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free(heap, offset, filp); + err = drm_sman_free_key(&info->sman, index); mutex_unlock(&info->dev->struct_mutex); return err; @@ -331,34 +110,14 @@ int xgi_free_ioctl(struct drm_device * dev, void * data, int xgi_fb_heap_init(struct xgi_info * info) { - return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START, + int err; + + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL, + XGI_FB_HEAP_START, info->fb.size - XGI_FB_HEAP_START); -} - -/** - * Free all blocks associated with a particular file handle. - */ -void xgi_free_all(struct xgi_info * info, struct xgi_mem_heap * heap, - struct drm_file * filp) -{ - if (!heap->initialized) { - return; - } - - - do { - struct xgi_mem_block *block; - - list_for_each_entry(block, &heap->used_list, list) { - if (block->filp == filp) { - break; - } - } - - if (&block->list == &heap->used_list) { - break; - } - - (void) xgi_mem_free(heap, block->offset, filp); - } while(1); + mutex_unlock(&info->dev->struct_mutex); + + info->fb_heap_initialized = (err == 0); + return err; } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 4ec9b6ff..932615a4 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -131,11 +131,15 @@ int xgi_pcie_heap_init(struct xgi_info * info) } - err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size); + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL, + 0, info->pcie.size); + mutex_unlock(&info->dev->struct_mutex); if (err) { xgi_pcie_lut_cleanup(info); } + info->pcie_heap_initialized = (err == 0); return err; } From 6dd97099ea5c6dc7931c6b482eb5935f7dd9ed2d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 16:20:44 -0700 Subject: [PATCH 238/437] Minor clean up of IRQ code. Much, much more to come. --- linux-core/xgi_drv.c | 29 +++++++++-------------------- linux-core/xgi_drv.h | 2 +- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index b0f84c81..d0878184 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -186,8 +186,6 @@ void xgi_engine_init(struct xgi_info * info) } -void xgi_kern_isr_bh(struct drm_device *dev); - int xgi_bootstrap(struct drm_device * dev, void * data, struct drm_file * filp) { @@ -335,29 +333,20 @@ void xgi_reclaim_buffers_locked(struct drm_device * dev, irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; -// struct xgi_info *info = dev->dev_private; - u32 need_to_run_bottom_half = 0; - - //DRM_INFO("xgi_kern_isr \n"); - - //xgi_dvi_irq_handler(info); - - if (need_to_run_bottom_half) { - drm_locked_tasklet(dev, xgi_kern_isr_bh); - } - - return IRQ_HANDLED; -} - -void xgi_kern_isr_bh(struct drm_device *dev) -{ struct xgi_info *info = dev->dev_private; + const u32 irq_bits = DRM_READ32(info->mmio_map, 0x2810); - DRM_INFO("xgi_kern_isr_bh \n"); - //xgi_dvi_irq_handler(info); + if ((irq_bits & 0x00000000) != 0) { + DRM_WRITE32(info->mmio_map, 0x2810, + 0x04000000 | irq_bits); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } } + int xgi_driver_load(struct drm_device *dev, unsigned long flags) { struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 6b209aa4..6afc4c6c 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -39,7 +39,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_PATCHLEVEL 2 #include "xgi_cmdlist.h" #include "xgi_drm.h" From 371f0a4d410f02d8db050b51fd2e714f888a71e0 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 18:15:42 -0700 Subject: [PATCH 239/437] Mask off correct bits in M2REG_AUTO_LINK_STATUS for interrupt handling. --- linux-core/xgi_cmdlist.h | 29 ----------------------- linux-core/xgi_drv.c | 15 ++++++++---- linux-core/xgi_regs.h | 51 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 07a2eb98..dc3fbe5a 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -27,35 +27,6 @@ #ifndef _XGI_CMDLIST_H_ #define _XGI_CMDLIST_H_ -#define ONE_BIT_MASK 0x1 -#define TWENTY_BIT_MASK 0xfffff -#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) -#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK -#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) -#define BASE_3D_ENG 0x2800 -#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 -#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) -#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) -#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) -#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) -#define BEGIN_BEGIN_IDENTIFICATION_MASK (TWENTY_BIT_MASK<<0) -#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 - -typedef enum { - FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, - FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, - FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK -} FLUSH_CODE; - -typedef enum { - AGPCMDLIST_SCRATCH_SIZE = 0x100, - AGPCMDLIST_BEGIN_SIZE = 0x004, - AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, - AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, - AGPCMDLIST_FLUSH_CMD_LEN = 0x004, - AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE -} CMD_SIZE; - struct xgi_cmdring_info { /** * Kernel space pointer to the base of the command ring. diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index d0878184..f20df585 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -334,12 +334,19 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; struct xgi_info *info = dev->dev_private; - const u32 irq_bits = DRM_READ32(info->mmio_map, 0x2810); + const u32 irq_bits = DRM_READ32(info->mmio_map, + (0x2800 + + M2REG_AUTO_LINK_STATUS_ADDRESS)) + & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK + | M2REG_ACTIVE_INTERRUPT_0_MASK + | M2REG_ACTIVE_INTERRUPT_2_MASK + | M2REG_ACTIVE_INTERRUPT_3_MASK); - if ((irq_bits & 0x00000000) != 0) { - DRM_WRITE32(info->mmio_map, 0x2810, - 0x04000000 | irq_bits); + if (irq_bits != 0) { + DRM_WRITE32(info->mmio_map, + 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, + M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); return IRQ_HANDLED; } else { return IRQ_NONE; diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index b3a47f8e..57e93405 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -30,6 +30,57 @@ #include "drmP.h" #include "drm.h" +#define BASE_3D_ENG 0x2800 + +#define MAKE_MASK(bits) ((1U << (bits)) - 1) + +#define ONE_BIT_MASK MAKE_MASK(1) +#define TWENTY_BIT_MASK MAKE_MASK(20) +#define TWENTYONE_BIT_MASK MAKE_MASK(21) +#define TWENTYTWO_BIT_MASK MAKE_MASK(22) + +#define M2REG_FLUSH_ENGINE_ADDRESS 0x000 +#define M2REG_FLUSH_ENGINE_COMMAND 0x00 +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK + +/* Write register */ +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010 +#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04 +#define M2REG_CLEAR_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11) +#define M2REG_CLEAR_INTERRUPT_3_MASK (ONE_BIT_MASK<<10) +#define M2REG_CLEAR_INTERRUPT_2_MASK (ONE_BIT_MASK<<9) +#define M2REG_CLEAR_INTERRUPT_0_MASK (ONE_BIT_MASK<<8) +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK (ONE_BIT_MASK<<0) + +/* Read register */ +#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010 +#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04 +#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11) +#define M2REG_ACTIVE_INTERRUPT_3_MASK (ONE_BIT_MASK<<10) +#define M2REG_ACTIVE_INTERRUPT_2_MASK (ONE_BIT_MASK<<9) +#define M2REG_ACTIVE_INTERRUPT_0_MASK (ONE_BIT_MASK<<8) +#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK (ONE_BIT_MASK<<0) + +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014 +#define M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05 + + +/** + * Begin instruction, double-word 0 + */ +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK + +/** + * Begin instruction, double-word 1 + */ +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define BEGIN_COMMAND_LIST_LENGTH_MASK TWENTYTWO_BIT_MASK + /* Hardware access functions */ static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data) From 06e09842dfbdaa9502d3b3e6b657de4e3630644c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 18:28:16 -0700 Subject: [PATCH 240/437] Use DRM_MEMORYBARRIER() macro instead of mb(). --- linux-core/xgi_pcie.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 932615a4..d15ea32a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -96,11 +96,7 @@ static int xgi_pcie_lut_init(struct xgi_info * info) lut[i] = info->dev->sg->busaddr[i]; } -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#else - mb(); -#endif + DRM_MEMORYBARRIER(); /* Set GART in SFB */ temp = DRM_READ8(info->mmio_map, 0xB00C); From 20a0e5e4298761ae6005399e45d66b93109d2121 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 18:57:15 -0700 Subject: [PATCH 241/437] After calling drm_sman_cleanup, mark both heaps as uninitialized. Since the heaps weren't marked as uninitialized, SG memory was never re-allocated. This prevented the X-server from being able to restart without re-loading the kernel module. --- linux-core/xgi_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index f20df585..fa418c0d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -301,6 +301,9 @@ void xgi_driver_lastclose(struct drm_device * dev) if (info->fb_heap_initialized || info->pcie_heap_initialized) { drm_sman_cleanup(&info->sman); + + info->fb_heap_initialized = FALSE; + info->pcie_heap_initialized = FALSE; } } } From 39907f613b6c84499c34c9a6ece5f5dde64788c0 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 10 Aug 2007 13:53:10 +1000 Subject: [PATCH 242/437] nouveau: Allow creation of gpuobjs before any other init has taken place. --- shared-core/nouveau_drv.h | 2 ++ shared-core/nouveau_object.c | 26 ++++++++++++++++++++++++-- shared-core/nouveau_state.c | 5 ++++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 572df46e..4d5c7f7e 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -370,8 +370,10 @@ extern int nouveau_fifo_alloc(struct drm_device *dev, extern void nouveau_fifo_free(struct nouveau_channel *); /* nouveau_object.c */ +extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); +extern void nouveau_gpuobj_late_takedown(struct drm_device *); extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index bb096531..d4142e44 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -264,12 +264,26 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, return 0; } +int +nouveau_gpuobj_early_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + + return 0; +} + int nouveau_gpuobj_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; + DRM_DEBUG("\n"); + if (dev_priv->card_type < NV_50) { if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, dev_priv->ramht_size, @@ -286,12 +300,20 @@ void nouveau_gpuobj_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = NULL; - struct list_head *entry, *tmp; DRM_DEBUG("\n"); nouveau_gpuobj_del(dev, &dev_priv->ramht); +} + +void +nouveau_gpuobj_late_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + struct list_head *entry, *tmp; + + DRM_DEBUG("\n"); list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { gpuobj = list_entry(entry, struct nouveau_gpuobj, list); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index e80e77a5..eac38060 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -296,7 +296,8 @@ nouveau_card_init(struct drm_device *dev) engine = &dev_priv->Engine; dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; - INIT_LIST_HEAD(&dev_priv->gpuobj_list); + ret = nouveau_gpuobj_early_init(dev); + if (ret) return ret; /* Initialise instance memory, must happen before mem_init so we * know exactly how much VRAM we're able to use for "normal" @@ -375,6 +376,8 @@ static void nouveau_card_takedown(struct drm_device *dev) drm_irq_uninstall(dev); + nouveau_gpuobj_late_takedown(dev); + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; } } From a46104674f129e873b8dfa29cf8aac9c67bd77be Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 10 Aug 2007 13:54:26 +1000 Subject: [PATCH 243/437] nouveau/nv50: demagic instmem setup. --- shared-core/nouveau_drv.h | 3 +- shared-core/nouveau_fifo.c | 2 - shared-core/nouveau_object.c | 50 +++++-- shared-core/nouveau_reg.h | 10 ++ shared-core/nv04_fifo.c | 2 +- shared-core/nv10_fifo.c | 2 +- shared-core/nv40_fifo.c | 2 +- shared-core/nv50_fifo.c | 37 ++---- shared-core/nv50_instmem.c | 246 ++++++++++++++++++++++------------- 9 files changed, 217 insertions(+), 137 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 4d5c7f7e..e3d0ff4c 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -393,7 +393,8 @@ extern int nouveau_gpuobj_new_ref(struct drm_device *, struct nouveau_channel *ref_chan, uint32_t handle, int size, int align, uint32_t flags, struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_new_fake(struct drm_device *, uint32_t offset, +extern int nouveau_gpuobj_new_fake(struct drm_device *, + uint32_t p_offset, uint32_t b_offset, uint32_t size, uint32_t flags, struct nouveau_gpuobj **, struct nouveau_gpuobj_ref**); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 22bced14..f0c2a556 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -285,8 +285,6 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, * When there are no more contexts, you lost */ for(channel=0; channelcard_type == NV_50) && (channel == 0)) - continue; if (dev_priv->fifos[channel] == NULL) break; } diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index d4142e44..e0cb334f 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -72,6 +72,8 @@ nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) uint32_t hash = 0; int i; + DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle); + for (i=32;i>0;i-=dev_priv->ramht_bits) { hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); handle >>= dev_priv->ramht_bits; @@ -80,7 +82,7 @@ nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) hash ^= channel << (dev_priv->ramht_bits - 4); hash <<= 3; - DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); + DRM_DEBUG("hash=0x%08x\n", hash); return hash; } @@ -286,7 +288,7 @@ nouveau_gpuobj_init(struct drm_device *dev) if (dev_priv->card_type < NV_50) { if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, - dev_priv->ramht_size, + ~0, dev_priv->ramht_size, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, &dev_priv->ramht, NULL))) @@ -346,7 +348,13 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - engine->instmem.clear(dev, gpuobj); + if (gpuobj->im_backing) { + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + drm_free(gpuobj->im_backing, + sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER); + else + engine->instmem.clear(dev, gpuobj); + } if (gpuobj->im_pramin) { if (gpuobj->flags & NVOBJ_FLAG_FAKE) @@ -525,7 +533,8 @@ nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, } int -nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, +nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, + uint32_t b_offset, uint32_t size, uint32_t flags, struct nouveau_gpuobj **pgpuobj, struct nouveau_gpuobj_ref **pref) { @@ -533,8 +542,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, struct nouveau_gpuobj *gpuobj = NULL; int i; - DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", - offset, size, flags); + DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", + p_offset, b_offset, size, flags); gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); if (!gpuobj) @@ -545,14 +554,27 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), - DRM_MEM_DRIVER); - if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); - return -ENOMEM; + if (p_offset != ~0) { + gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + gpuobj->im_pramin->start = p_offset; + gpuobj->im_pramin->size = size; + } + + if (b_offset != ~0) { + gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_backing) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + gpuobj->im_backing->start = b_offset; + gpuobj->im_backing->size = size; } - gpuobj->im_pramin->start = offset; - gpuobj->im_pramin->size = size; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->im_pramin->size; i += 4) @@ -962,7 +984,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; vm_offset += chan->ramin->gpuobj->im_pramin->start; - if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, 0x4000, + if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 0, &chan->vm_pd, NULL))) return ret; for (i=0; i<0x4000; i+=8) { diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 47d54b2a..65614627 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -80,6 +80,16 @@ #define NV40_PMC_1708 0x00001708 #define NV40_PMC_170C 0x0000170C +/* probably PMC ? */ +#define NV50_PUNK_BAR0_PRAMIN 0x00001700 +#define NV50_PUNK_BAR_CFG_BASE 0x00001704 +#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) +#define NV50_PUNK_BAR1_CTXDMA 0x00001708 +#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) +#define NV50_PUNK_BAR3_CTXDMA 0x0000170C +#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) +#define NV50_PUNK_UNK1710 0x00001710 + #define NV04_PTIMER_INTR_0 0x00009100 #define NV04_PTIMER_INTR_EN_0 0x00009140 #define NV04_PTIMER_NUMERATOR 0x00009200 diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index 4d61f4fe..d750ced8 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -42,7 +42,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), + if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, NV04_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index a056460d..c86725d2 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -43,7 +43,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), + if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index f04c2882..eb160ee2 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -43,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), + if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index f915d332..71b89d6d 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -30,7 +30,6 @@ typedef struct { struct nouveau_gpuobj_ref *thingo; - struct nouveau_gpuobj_ref *dummyctx; } nv50_fifo_priv; #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) @@ -47,7 +46,7 @@ nv50_fifo_init_thingo(struct drm_device *dev) INSTANCE_WR(thingo->gpuobj, 0, 0x7e); INSTANCE_WR(thingo->gpuobj, 1, 0x7e); - for (i = 0; i fifos[i]) { INSTANCE_WR(thingo->gpuobj, fi, i); fi++; @@ -60,7 +59,7 @@ nv50_fifo_init_thingo(struct drm_device *dev) } static int -nv50_fifo_channel_enable(struct drm_device *dev, int channel) +nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->fifos[channel]; @@ -83,7 +82,7 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel) NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); } - nv50_fifo_init_thingo(dev); + if (!nt) nv50_fifo_init_thingo(dev); return 0; } @@ -156,18 +155,9 @@ static int nv50_fifo_init_regs(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; - int ret; DRM_DEBUG("\n"); - if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 0x1000, - 0x1000, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, - &priv->dummyctx))) - return ret; - NV_WRITE(0x2500, 0); NV_WRITE(0x3250, 0); NV_WRITE(0x3220, 0); @@ -175,13 +165,9 @@ nv50_fifo_init_regs(struct drm_device *dev) NV_WRITE(0x3210, 0); NV_WRITE(0x3270, 0); - if (IS_G80) { - NV_WRITE(0x2600, (priv->dummyctx->instance>>8) | (1<<31)); - NV_WRITE(0x27fc, (priv->dummyctx->instance>>8) | (1<<31)); - } else { - NV_WRITE(0x2600, (priv->dummyctx->instance>>12) | (1<<31)); - NV_WRITE(0x27fc, (priv->dummyctx->instance>>12) | (1<<31)); - } + /* Enable dummy channels setup by nv50_instmem.c */ + nv50_fifo_channel_enable(dev, 0, 1); + nv50_fifo_channel_enable(dev, 127, 1); return 0; } @@ -209,6 +195,7 @@ nv50_fifo_init(struct drm_device *dev) DRM_ERROR("error creating thingo: %d\n", ret); return ret; } + nv50_fifo_init_context_table(dev); nv50_fifo_init_regs__nv(dev); @@ -230,7 +217,6 @@ nv50_fifo_takedown(struct drm_device *dev) return; nouveau_gpuobj_ref_del(dev, &priv->thingo); - nouveau_gpuobj_ref_del(dev, &priv->dummyctx); dev_priv->Engine.fifo.priv = NULL; drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); @@ -248,7 +234,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) if (IS_G80) { uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; - if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, 0x100, + if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, ~0, 0x100, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &ramfc, &chan->ramfc))) @@ -285,7 +271,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); } - if ((ret = nv50_fifo_channel_enable(dev, chan->id))) { + if ((ret = nv50_fifo_channel_enable(dev, chan->id, 0))) { DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret); nouveau_gpuobj_ref_del(dev, &chan->ramfc); return ret; @@ -302,6 +288,11 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) DRM_DEBUG("ch%d\n", chan->id); nv50_fifo_channel_disable(dev, chan->id, 0); + + /* Dummy channel, also used on ch 127 */ + if (chan->id == 0) + nv50_fifo_channel_disable(dev, 127, 0); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index c26b1db5..1eeb54df 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -31,118 +31,162 @@ typedef struct { uint32_t save1700[5]; /* 0x1700->0x1710 */ + + struct nouveau_gpuobj_ref *pramin_pt; + struct nouveau_gpuobj_ref *pramin_bar; } nv50_instmem_priv; #define NV50_INSTMEM_PAGE_SHIFT 12 #define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) -#define NV50_INSTMEM_RSVD_SIZE (64 * 1024) #define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) +/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN + */ +#define BAR0_WI32(g,o,v) do { \ + uint32_t offset; \ + if ((g)->im_backing) { \ + offset = (g)->im_backing->start; \ + } else { \ + offset = chan->ramin->gpuobj->im_backing->start; \ + offset += (g)->im_pramin->start; \ + } \ + offset += (o); \ + NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \ +} while(0) + int nv50_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; nv50_instmem_priv *priv; - uint32_t rv, pt, pts, cb, cb0, cb1, unk, as; - uint32_t i, v; - int ret; + int ret, i; + uint32_t v; priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); if (!priv) return -ENOMEM; dev_priv->Engine.instmem.priv = priv; - /* Save current state */ - for (i = 0x1700; i <= 0x1710; i+=4) - priv->save1700[(i-0x1700)/4] = NV_READ(i); - - as = dev_priv->ramin->size; - rv = nouveau_mem_fb_amount(dev) - (1*1024*1024); - pt = rv + 0xd0000; - pts = NV50_INSTMEM_PT_SIZE(as); - cb = rv + 0xc8000; - if ((dev_priv->chipset & 0xf0) != 0x50) { - unk = cb + 0x4200; - cb0 = cb + 0x4240; - cb1 = cb + 0x278; - } else { - unk = cb + 0x5400; - cb0 = cb + 0x5440; - cb1 = cb + 0x1478; - } - - DRM_DEBUG("PRAMIN config:\n"); - DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", rv); - DRM_DEBUG(" Aperture size: %i MiB\n", as >> 20); - DRM_DEBUG(" PT base: 0x%08x\n", pt); - DRM_DEBUG(" PT size: %d KiB\n", pts >> 10); - DRM_DEBUG(" BIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); - DRM_DEBUG(" Config base: 0x%08x\n", cb); - DRM_DEBUG(" ctxdma Config0: 0x%08x\n", cb0); - DRM_DEBUG(" Config1: 0x%08x\n", cb1); - - /* Map first MiB of reserved vram into BAR0 PRAMIN aperture */ - NV_WRITE(0x1700, (rv>>16)); - /* Poke some regs.. */ - NV_WRITE(0x1704, (cb>>12)); - NV_WRITE(0x1710, (((unk-cb)>>4))|(1<<31)); - NV_WRITE(0x1704, (cb>>12)|(1<<30)); - - /* CB0, some DMA object, NFI what it points at... Needed however, - * or the PRAMIN aperture doesn't operate as expected. + /* Reserve the last MiB of VRAM, we should probably try to avoid + * setting up the below tables over the top of the VBIOS image at + * some point. */ - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x00, 0x7fc00000); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x04, 0xe1ffffff); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x08, 0xe0000000); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x0c, 0x01000001); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x10, 0x00000000); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x14, 0x00000000); + dev_priv->ramin_rsvd_vram = 1 << 20; + c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; + c_size = 128 << 10; + c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; + c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; + c_base = c_vmpd + 0x4000; + pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size); - /* CB1, points at PRAMIN PT */ - NV_WRITE(NV_RAMIN + (cb1 - rv) + 0, pt | 0x63); - NV_WRITE(NV_RAMIN + (cb1 - rv) + 4, 0x00000000); + DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset); + DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); + DRM_DEBUG(" Aperture size: %d MiB\n", + (uint32_t)dev_priv->ramin->size >> 20); + DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10); - /* Zero PRAMIN page table */ - v = NV_RAMIN + (pt - rv); - for (i = v; i < v + pts; i += 8) { - NV_WRITE(i + 0x00, 0x00000009); - NV_WRITE(i + 0x04, 0x00000000); - } + NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); - /* Map page table into PRAMIN aperture */ - for (i = pt; i < pt + pts; i += 0x1000) { - uint32_t pte = NV_RAMIN + (pt-rv) + (((i-pt) >> 12) << 3); - DRM_DEBUG("PRAMIN PTE = 0x%08x @ 0x%08x\n", i, pte); - NV_WRITE(pte + 0x00, i | 1); - NV_WRITE(pte + 0x04, 0x00000000); - } - - /* Points at CB0 */ - NV_WRITE(0x170c, (((cb0 - cb)>>4)|(1<<31))); - - /* Confirm it all worked, should be able to read back the page table's - * PTEs from the PRAMIN BAR + /* Create a fake channel, and use it as our "dummy" channels 0/127. + * The main reason for creating a channel is so we can use the gpuobj + * code. However, it's probably worth noting that NVIDIA also setup + * their channels 0/127 with the same values they configure here. + * So, there may be some other reason for doing this. + * + * Have to create the entire channel manually, as the real channel + * creation code assumes we have PRAMIN access, and we don't until + * we're done here. */ - NV_WRITE(0x1700, pt >> 16); - if (NV_READ(0x700000) != NV_RI32(0)) { - DRM_ERROR("Failed to init PRAMIN page table\n"); - return -EINVAL; - } - - /* Create a heap to manage PRAMIN aperture allocations */ - ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, pts, as-pts); - if (ret) { - DRM_ERROR("Failed to init PRAMIN heap\n"); + chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER); + if (!chan) return -ENOMEM; + chan->id = 0; + chan->dev = dev; + chan->file_priv = (struct drm_file *)-2; + dev_priv->fifos[0] = dev_priv->fifos[127] = chan; + + /* Channel's PRAMIN object + heap */ + if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0, + NULL, &chan->ramin))) + return ret; + + if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) + return -ENOMEM; + + /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ + if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, + 0x4000, 0, NULL, &chan->ramfc))) + return ret; + + for (i = 0; i < c_vmpd; i += 4) + BAR0_WI32(chan->ramin->gpuobj, i, 0); + + /* VM page directory */ + if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, + 0x4000, 0, &chan->vm_pd, NULL))) + return ret; + for (i = 0; i < 0x4000; i += 8) { + BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); + BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); } - DRM_DEBUG("NV50: PRAMIN setup ok\n"); - /* Don't alloc the last MiB of VRAM, probably too much, but be safe - * at least for now. + /* PRAMIN page table, cheat and map into VM at 0x0000000000. + * We map the entire fake channel into the start of the PRAMIN BAR */ - dev_priv->ramin_rsvd_vram = 1*1024*1024; + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, + 0, &priv->pramin_pt))) + return ret; - /*XXX: probably incorrect, but needed to make hash func "work" */ + for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) { + if (v < (c_offset + c_size)) + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); + else + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + } + + BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); + BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); + + /* DMA object for PRAMIN BAR */ + if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, + &priv->pramin_bar))) + return ret; + BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); + + /* Poke the relevant regs, and pray it works :) */ + NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + NV_WRITE(NV50_PUNK_UNK1710, 0); + NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + NV50_PUNK_BAR_CFG_BASE_VALID); + NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0); + NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + NV50_PUNK_BAR3_CTXDMA_VALID); + + /* Assume that praying isn't enough, check that we can re-read the + * entire fake channel back from the PRAMIN BAR */ + for (i = 0; i < c_size; i+=4) { + if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) { + DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i); + return -EINVAL; + } + } + + /* Global PRAMIN heap */ + if (nouveau_mem_init_heap(&dev_priv->ramin_heap, + c_size, dev_priv->ramin->size - c_size)) { + dev_priv->ramin_heap = NULL; + DRM_ERROR("Failed to init RAMIN heap\n"); + } + + /*XXX: incorrect, but needed to make hash func "work" */ dev_priv->ramht_offset = 0x10000; dev_priv->ramht_bits = 9; dev_priv->ramht_size = (1 << dev_priv->ramht_bits); @@ -154,8 +198,11 @@ nv50_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; + struct nouveau_channel *chan = dev_priv->fifos[0]; int i; + DRM_DEBUG("\n"); + if (!priv) return; @@ -163,6 +210,20 @@ nv50_instmem_takedown(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i+=4) NV_WRITE(i, priv->save1700[(i-0x1700)/4]); + nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); + nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); + + /* Destroy dummy channel */ + if (chan) { + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + nouveau_mem_takedown(&chan->ramin_heap); + + dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; + drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); + } + dev_priv->Engine.instmem.priv = NULL; drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); } @@ -205,6 +266,7 @@ int nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; uint32_t pte, pte_end, vram; if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) @@ -217,19 +279,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; vram = gpuobj->im_backing->start; - if (pte == pte_end) { - DRM_ERROR("WARNING: badness in bind() pte calc\n"); - pte_end++; - } - DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); DRM_DEBUG("first vram page: 0x%llx\n", gpuobj->im_backing->start); while (pte < pte_end) { - NV_WI32(pte + 0, vram | 1); - NV_WI32(pte + 4, 0x00000000); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); pte += 8; vram += NV50_INSTMEM_PAGE_SIZE; @@ -243,6 +300,7 @@ int nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; uint32_t pte, pte_end; if (gpuobj->im_bound == 0) @@ -251,8 +309,8 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte = (gpuobj->im_pramin->start >> 12) << 3; pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; while (pte < pte_end) { - NV_WI32(pte + 0, 0x00000000); - NV_WI32(pte + 4, 0x00000000); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); pte += 8; } From da279868706cc799bdf25cdd5523d11fda64d4cc Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 6 Aug 2007 18:33:29 +1000 Subject: [PATCH 244/437] i915: i965 non-secure batchbuffer bit has moved. --- shared-core/i915_dma.c | 10 ++++++++-- shared-core/i915_drv.h | 2 ++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 5d227d8b..9f18feee 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -546,9 +546,15 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, if (dev_priv->use_mi_batchbuffer_start) { BEGIN_LP_RING(2); - OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); - OUT_RING(batch->start | MI_BATCH_NON_SECURE); + if (IS_I965G(dev)) { + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); + OUT_RING(batch->start); + } else { + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); + OUT_RING(batch->start | MI_BATCH_NON_SECURE); + } ADVANCE_LP_RING(); + } else { BEGIN_LP_RING(4); OUT_RING(MI_BATCH_BUFFER); diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e641fdc6..528f7b3a 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -364,6 +364,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_NON_SECURE (1) +#define MI_BATCH_NON_SECURE_I965 (1<<8) + #define MI_WAIT_FOR_EVENT ((0x3<<23)) #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) From 263775c454f381fffc8f5d4f309b4e1b131c3734 Mon Sep 17 00:00:00 2001 From: vehemens Date: Mon, 13 Aug 2007 10:24:39 -0700 Subject: [PATCH 245/437] Fix drm_auth.c locking to not recurse on dev_lock. --- bsd-core/drm_auth.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c index 964f9a42..14cfc225 100644 --- a/bsd-core/drm_auth.c +++ b/bsd-core/drm_auth.c @@ -43,6 +43,8 @@ static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic) drm_magic_entry_t *pt; int hash = drm_hash_magic(magic); + DRM_SPINLOCK_ASSERT(&dev->dev_lock); + for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { if (pt->magic == magic) { return pt->priv; @@ -59,6 +61,8 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) DRM_DEBUG("%d\n", magic); + DRM_SPINLOCK_ASSERT(&dev->dev_lock); + hash = drm_hash_magic(magic); entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT); if (!entry) return ENOMEM; @@ -85,10 +89,11 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) drm_magic_entry_t *pt; int hash; + DRM_SPINLOCK_ASSERT(&dev->dev_lock); + DRM_DEBUG("%d\n", magic); hash = drm_hash_magic(magic); - DRM_LOCK(); for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { if (pt->magic == magic) { if (dev->magiclist[hash].head == pt) { @@ -100,11 +105,9 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) if (prev) { prev->next = pt->next; } - DRM_UNLOCK(); return 0; } } - DRM_UNLOCK(); free(pt, M_DRM); return EINVAL; @@ -129,8 +132,8 @@ int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) continue; } while (drm_find_file(dev, auth->magic)); file_priv->magic = auth->magic; - DRM_UNLOCK(); drm_add_magic(dev, file_priv, auth->magic); + DRM_UNLOCK(); } DRM_DEBUG("%u\n", auth->magic); From 3b07a37a48ca6dc22d538221b59b430dd72c6203 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 13 Aug 2007 10:50:25 -0700 Subject: [PATCH 246/437] Add doxygen and fix whitespace for drm_auth.c --- bsd-core/drm_auth.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c index 14cfc225..9b5f4f74 100644 --- a/bsd-core/drm_auth.c +++ b/bsd-core/drm_auth.c @@ -1,4 +1,4 @@ -/* drm_auth.h -- IOCTLs for authentication -*- linux-c -*- +/* drm_auth.c -- IOCTLs for authentication -*- linux-c -*- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com */ /*- @@ -38,6 +38,9 @@ static int drm_hash_magic(drm_magic_t magic) return magic & (DRM_HASH_SIZE-1); } +/** + * Returns the file private associated with the given magic number. + */ static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic) { drm_magic_entry_t *pt; @@ -54,6 +57,10 @@ static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic) return NULL; } +/** + * Inserts the given magic number into the hash table of used magic number + * lists. + */ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) { int hash; @@ -83,6 +90,10 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) return 0; } +/** + * Removes the given magic number from the hash table of used magic number + * lists. + */ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) { drm_magic_entry_t *prev = NULL; @@ -113,6 +124,14 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) return EINVAL; } +/** + * Called by the client, this returns a unique magic number to be authorized + * by the master. + * + * The master may use its own knowledge of the client (such as the X + * connection that the magic is passed over) to determine if the magic number + * should be authenticated. + */ int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) { static drm_magic_t sequence = 0; @@ -125,9 +144,9 @@ int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) DRM_LOCK(); do { int old = sequence; - + auth->magic = old+1; - + if (!atomic_cmpset_int(&sequence, old, auth->magic)) continue; } while (drm_find_file(dev, auth->magic)); @@ -141,6 +160,9 @@ int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) return 0; } +/** + * Marks the client associated with the given magic number as authenticated. + */ int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) { drm_auth_t *auth = data; From d6a45ebf0ee47c31f560f3072a4b70c4039e454a Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 13 Aug 2007 11:27:46 -0700 Subject: [PATCH 247/437] Add a regression test for authentication. --- .gitignore | 1 + tests/Makefile.am | 3 +- tests/auth.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 tests/auth.c diff --git a/.gitignore b/.gitignore index 8447db2b..f63c1532 100644 --- a/.gitignore +++ b/.gitignore @@ -51,6 +51,7 @@ sis.kld stamp-h1 tdfx.kld via.kld +tests/auth tests/dristat tests/drmstat tests/getclient diff --git a/tests/Makefile.am b/tests/Makefile.am index 38a07a35..e846efbe 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -15,7 +15,8 @@ libdrmtest_la_LIBADD = \ LDADD = libdrmtest.la -TESTS = openclose \ +TESTS = auth \ + openclose \ getversion \ getclient \ updatedraw diff --git a/tests/auth.c b/tests/auth.c new file mode 100644 index 00000000..45265d6c --- /dev/null +++ b/tests/auth.c @@ -0,0 +1,137 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include "drmtest.h" + +enum auth_event { + SERVER_READY, + CLIENT_MAGIC, + CLIENT_DONE, +}; + +int commfd[2]; + +static void wait_event(int pipe, enum auth_event expected_event) +{ + int ret; + enum auth_event event; + unsigned char in; + + ret = read(commfd[pipe], &in, 1); + if (ret == -1) + err(1, "read error"); + event = in; + + if (event != expected_event) + errx(1, "unexpected event: %d\n", event); +} + +static void +send_event(int pipe, enum auth_event send_event) +{ + int ret; + unsigned char event; + + event = send_event; + ret = write(commfd[pipe], &event, 1); + if (ret == -1) + err(1, "failed to send event %d", event); +} + +static void client() +{ + struct drm_auth auth; + int drmfd, ret; + + /* XXX: Should make sure we open the same DRM as the master */ + drmfd = drm_open_any(); + + wait_event(0, SERVER_READY); + + /* Get a client magic number and pass it to the master for auth. */ + auth.magic = 0; /* Quiet valgrind */ + ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth); + if (ret == -1) + err(1, "Couldn't get client magic"); + send_event(0, CLIENT_MAGIC); + ret = write(commfd[0], &auth.magic, sizeof(auth.magic)); + if (ret == -1) + err(1, "Couldn't write auth data"); + + /* Signal that the client is completely done. */ + send_event(0, CLIENT_DONE); +} + +static void server() +{ + int drmfd, ret; + struct drm_auth auth; + + drmfd = drm_open_any_master(); + + auth.magic = 0xd0d0d0d0; + ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth); + if (ret != -1 || errno != EINVAL) + errx(1, "Authenticating bad magic succeeded\n"); + + send_event(1, SERVER_READY); + + wait_event(1, CLIENT_MAGIC); + ret = read(commfd[1], &auth.magic, sizeof(auth.magic)); + if (ret == -1) + err(1, "Failure to read client magic"); + + ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth); + if (ret == -1) + err(1, "Authenticating bad magic succeeded\n"); + + wait_event(1, CLIENT_DONE); +} + +/** + * Checks DRM authentication mechanisms. + */ +int main(int argc, char **argv) +{ + int ret; + + ret = pipe(commfd); + if (ret == -1) + err(1, "Couldn't create pipe"); + + ret = fork(); + if (ret == -1) + err(1, "failure to fork client"); + if (ret == 0) + client(); + else + server(); + + return 0; +} + From 4340f49bf79a5421886363e08501ad347973b083 Mon Sep 17 00:00:00 2001 From: vehemens Date: Mon, 13 Aug 2007 10:17:47 -0700 Subject: [PATCH 248/437] Bug #11951: Fix an errno sign inversion on pre-FreeBSD 5. Also, annotate where signs change, to hopefully remind the reader of these issues in the future. --- bsd-core/drmP.h | 4 +++- bsd-core/drm_dma.c | 1 + bsd-core/drm_fops.c | 1 + bsd-core/drm_irq.c | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 73342d09..8a768f0c 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -377,6 +377,7 @@ do { \ } while (0) #if defined(__FreeBSD__) && __FreeBSD_version > 500000 +/* Returns -errno to shared code */ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ for ( ret = 0 ; !ret && !(condition) ; ) { \ DRM_UNLOCK(); \ @@ -388,11 +389,12 @@ for ( ret = 0 ; !ret && !(condition) ; ) { \ DRM_LOCK(); \ } #else +/* Returns -errno to shared code */ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ for ( ret = 0 ; !ret && !(condition) ; ) { \ int s = spldrm(); \ if (!(condition)) \ - ret = tsleep( &(queue), PZERO | PCATCH, \ + ret = -tsleep( &(queue), PZERO | PCATCH, \ "drmwtq", (timeout) ); \ splx(s); \ } diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c index fc1e1250..4896cf22 100644 --- a/bsd-core/drm_dma.c +++ b/bsd-core/drm_dma.c @@ -121,6 +121,7 @@ int drm_dma(drm_device_t *dev, void *data, struct drm_file *file_priv) { if (dev->driver.dma_ioctl) { + /* shared code returns -errno */ return -dev->driver.dma_ioctl(dev, data, file_priv); } else { DRM_DEBUG("DMA ioctl on driver with no dma handler\n"); diff --git a/bsd-core/drm_fops.c b/bsd-core/drm_fops.c index 870e4d29..2d037ea5 100644 --- a/bsd-core/drm_fops.c +++ b/bsd-core/drm_fops.c @@ -93,6 +93,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p, priv->authenticated = DRM_SUSER(p); if (dev->driver.open) { + /* shared code returns -errno */ retcode = -dev->driver.open(dev, priv); if (retcode != 0) { free(priv, M_DRM); diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 1ab532fe..9c437e9d 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -241,6 +241,7 @@ int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv) ret = EINVAL; } else { DRM_LOCK(); + /* shared code returns -errno */ ret = -dev->driver.vblank_wait(dev, &vblwait->request.sequence); DRM_UNLOCK(); From 15f841bd529b50901272ca35a4c57de42a51901a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 13 Aug 2007 16:21:20 -0700 Subject: [PATCH 249/437] Strobe magic 0xB03F register to flush PCI-e GART table. The original XGI kernel driver strobed 0xB03F each time a page was allocated to back a GART page. When the driver was converted to use the DRM SG interface, this code was lost. Returning it fixes a long standing issue where the X-server would work fine the first time, but acceleration commands would be ignored on the second X-server invocation. --- linux-core/xgi_pcie.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index d15ea32a..a0f52740 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -103,7 +103,11 @@ static int xgi_pcie_lut_init(struct xgi_info * info) DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); /* Set GART base address to HW */ - dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr); + DRM_WRITE32(info->mmio_map, 0xB034, info->lut_handle->busaddr); + + /* Flush GART table. */ + DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); + DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); return 0; } From 3ee211f4f7435792752c1dbcd3a60e2e7abfba09 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 13 Aug 2007 16:29:24 -0700 Subject: [PATCH 250/437] Bug #11895: Only add the AGP base to map offset if the caller didn't. The i830 and newer intel 2D code adds the AGP base to map offsets already, because it wasn't doing the AGP enable which used to set dev->agp->base. Credit goes to Zhenyu for finding the issue. --- linux-core/drm_bufs.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index f9987ca6..f0b28fa1 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -223,11 +223,17 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset, #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif - /* Note: dev->agp->base may actually be 0 when the DRM - * is not in control of AGP space. But if user space is - * it should already have added the AGP base itself. + /* In some cases (i810 driver), user space may have already + * added the AGP base itself, because dev->agp->base previously + * only got set during AGP enable. So, only add the base + * address if the map's offset isn't already within the + * aperture. */ - map->offset += dev->agp->base; + if (map->offset < dev->agp->base || + map->offset > dev->agp->base + + dev->agp->agp_info.aper_size * 1024 * 1024) { + map->offset += dev->agp->base; + } map->mtrr = dev->agp->agp_mtrr; /* for getmap */ /* This assumes the DRM is in total control of AGP space. From 02c4e0e757b69cd6ae38b8ab2c078b3f06fea661 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 00:56:24 +1000 Subject: [PATCH 251/437] nouveau/nv40: Fix channel scheduling. Ensure NV_PFIFO_DMA_TIMESLICE_TIMEOUT_ENABLE gets set, otherwise channels will appear to "freeze" in some circumstances. --- shared-core/nouveau_drv.h | 1 + shared-core/nouveau_state.c | 2 +- shared-core/nv40_fifo.c | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index e3d0ff4c..b7459b1b 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -455,6 +455,7 @@ extern int nv10_fifo_load_context(struct nouveau_channel *); extern int nv10_fifo_save_context(struct nouveau_channel *); /* nv40_fifo.c */ +extern int nv40_fifo_init(struct drm_device *); extern int nv40_fifo_create_context(struct nouveau_channel *); extern void nv40_fifo_destroy_context(struct nouveau_channel *); extern int nv40_fifo_load_context(struct nouveau_channel *); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index eac38060..d885f7c6 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -224,7 +224,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv40_graph_destroy_context; engine->graph.load_context = nv40_graph_load_context; engine->graph.save_context = nv40_graph_save_context; - engine->fifo.init = nouveau_fifo_init; + engine->fifo.init = nv40_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv40_fifo_create_context; engine->fifo.destroy_context = nv40_fifo_destroy_context; diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index eb160ee2..ce3f8fdd 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -193,3 +193,16 @@ nv40_fifo_save_context(struct nouveau_channel *chan) return 0; } +int +nv40_fifo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if ((ret = nouveau_fifo_init(dev))) + return ret; + + NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); + return 0; +} + From a6ea60c77e8d4a266d696e0d99c11b1f39578dcc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 01:40:46 +1000 Subject: [PATCH 252/437] nouveau: Catch all NV4x chips instead of just NV_40. --- shared-core/nv04_instmem.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index 5e0f6f4e..b5569b55 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -9,21 +9,18 @@ nv04_instmem_determine_amount(struct drm_device *dev) int i; /* Figure out how much instance memory we need */ - switch (dev_priv->card_type) { - case NV_40: + if (dev_priv->card_type >= NV_40) { /* We'll want more instance memory than this on some NV4x cards. * There's a 16MB aperture to play with that maps onto the end * of vram. For now, only reserve a small piece until we know * more about what each chipset requires. */ dev_priv->ramin_rsvd_vram = (1*1024* 1024); - break; - default: + } else { /*XXX: what *are* the limits on ramin_rsvd_vram = (512*1024); - break; } DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10); From 891714d8d732480af97fbc45562145a560b7999b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:18:44 -0700 Subject: [PATCH 253/437] Clean up xgi_(en|dis)able_(mmio|ge) and move to xgi_misc.c. --- linux-core/xgi_drv.h | 5 ++ linux-core/xgi_misc.c | 126 +++++++++++++++++++++++++++++++++++ linux-core/xgi_regs.h | 150 +++++++++++------------------------------- 3 files changed, 171 insertions(+), 110 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 6afc4c6c..9f746de2 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -87,6 +87,11 @@ extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); +extern void xgi_enable_mmio(struct xgi_info * info); +extern void xgi_disable_mmio(struct xgi_info * info); +extern void xgi_enable_ge(struct xgi_info * info); +extern void xgi_disable_ge(struct xgi_info * info); + extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 84d1d4f2..be75e970 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -374,3 +374,129 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) } } } + + +void xgi_enable_mmio(struct xgi_info * info) +{ + u8 protect = 0; + u8 temp; + + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); + + DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); + + /* Enable MMIO */ + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); + + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); +} + + +void xgi_disable_mmio(struct xgi_info * info) +{ + u8 protect = 0; + u8 temp; + + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); + + /* Disable MMIO access */ + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); + + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); +} + + +void xgi_enable_ge(struct xgi_info * info) +{ + u8 bOld3cf2a; + int wait = 0; + + OUT3C5B(info->mmio_map, 0x11, 0x92); + + /* Save and close dynamic gating + */ + bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL); + OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM); + + /* Enable 2D and 3D GE + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Reset both 3D and 2D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, + (GE_ENABLE | GE_RESET | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Enable 2D engine only + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE); + + /* Enable 2D+3D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + /* Restore dynamic gating + */ + OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a); +} + + +void xgi_disable_ge(struct xgi_info * info) +{ + int wait = 0; + + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Reset both 3D and 2D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, + (GE_ENABLE | GE_RESET | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Disable 2D engine and 3D engine. + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0); +} diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 57e93405..7f643f92 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -30,8 +30,6 @@ #include "drmP.h" #include "drm.h" -#define BASE_3D_ENG 0x2800 - #define MAKE_MASK(bits) ((1U << (bits)) - 1) #define ONE_BIT_MASK MAKE_MASK(1) @@ -39,6 +37,46 @@ #define TWENTYONE_BIT_MASK MAKE_MASK(21) #define TWENTYTWO_BIT_MASK MAKE_MASK(22) + +/* Port 0x3d4/0x3d5, index 0x2a */ +#define XGI_INTERFACE_SEL 0x2a +#define DUAL_64BIT (1U<<7) +#define INTERNAL_32BIT (1U<<6) +#define EN_SEP_WR (1U<<5) +#define POWER_DOWN_SEL (1U<<4) +/*#define RESERVED_3 (1U<<3) */ +#define SUBS_MCLK_PCICLK (1U<<2) +#define MEM_SIZE_MASK (3<<0) +#define MEM_SIZE_32MB (0<<0) +#define MEM_SIZE_64MB (1<<0) +#define MEM_SIZE_128MB (2<<0) +#define MEM_SIZE_256MB (3<<0) + +/* Port 0x3d4/0x3d5, index 0x36 */ +#define XGI_GE_CNTL 0x36 +#define GE_ENABLE (1U<<7) +/*#define RESERVED_6 (1U<<6) */ +/*#define RESERVED_5 (1U<<5) */ +#define GE_RESET (1U<<4) +/*#define RESERVED_3 (1U<<3) */ +#define GE_ENABLE_3D (1U<<2) +/*#define RESERVED_1 (1U<<1) */ +/*#define RESERVED_0 (1U<<0) */ + +/* Port 0x3ce/0x3cf, index 0x2a */ +#define XGI_MISC_CTRL 0x2a +#define MOTION_VID_SUSPEND (1U<<7) +#define DVI_CRTC_TIMING_SEL (1U<<6) +#define LCD_SEL_CTL_NEW (1U<<5) +#define LCD_SEL_EXT_DELYCTRL (1U<<4) +#define REG_LCDDPARST (1U<<3) +#define LCD2DPAOFF (1U<<2) +/*#define RESERVED_1 (1U<<1) */ +#define EN_GEPWM (1U<<0) /* Enable GE power management */ + + +#define BASE_3D_ENG 0x2800 + #define M2REG_FLUSH_ENGINE_ADDRESS 0x000 #define M2REG_FLUSH_ENGINE_COMMAND 0x00 #define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) @@ -133,114 +171,6 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) } -static inline void xgi_enable_mmio(struct xgi_info * info) -{ - u8 protect = 0; - u8 temp; - - /* Unprotect registers */ - DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); - protect = DRM_READ8(info->mmio_map, 0x3C5); - DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); - - DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); - temp = DRM_READ8(info->mmio_map, 0x3D5); - DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); - - /* Enable MMIO */ - DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); - temp = DRM_READ8(info->mmio_map, 0x3D5); - DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); - - /* Protect registers */ - OUT3C5B(info->mmio_map, 0x11, protect); -} - -static inline void xgi_disable_mmio(struct xgi_info * info) -{ - u8 protect = 0; - u8 temp; - - /* Unprotect registers */ - DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); - protect = DRM_READ8(info->mmio_map, 0x3C5); - DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); - - /* Disable MMIO access */ - DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); - temp = DRM_READ8(info->mmio_map, 0x3D5); - DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); - - /* Protect registers */ - OUT3C5B(info->mmio_map, 0x11, protect); -} - -static inline void xgi_enable_ge(struct xgi_info * info) -{ - unsigned char bOld3cf2a = 0; - int wait = 0; - - // Enable GE - OUT3C5B(info->mmio_map, 0x11, 0x92); - - // Save and close dynamic gating - bOld3cf2a = IN3CFB(info->mmio_map, 0x2a); - OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe); - - // Reset both 3D and 2D engine - OUT3X5B(info->mmio_map, 0x36, 0x84); - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x94); - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x84); - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - // Enable 2D engine only - OUT3X5B(info->mmio_map, 0x36, 0x80); - - // Enable 2D+3D engine - OUT3X5B(info->mmio_map, 0x36, 0x84); - - // Restore dynamic gating - OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a); -} - -static inline void xgi_disable_ge(struct xgi_info * info) -{ - int wait = 0; - - // Reset both 3D and 2D engine - OUT3X5B(info->mmio_map, 0x36, 0x84); - - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x94); - - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x84); - - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - - // Disable 2D engine only - OUT3X5B(info->mmio_map, 0x36, 0); -} - static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) { OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 From be76f0eea9b455fde77e15ff35f4f00c70661e51 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:19:48 -0700 Subject: [PATCH 254/437] Remove unused interrupt related functions. --- linux-core/xgi_regs.h | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 7f643f92..098d824d 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -170,28 +170,4 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) DRM_WRITE32(map, addr, data); } - -static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01); //Set 3cf.39 bit 0 to 1 - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02); -} -static inline void xgi_disable_dvi_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02); -} - -static inline void xgi_enable_crt1_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04); - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04); - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08); -} - -static inline void xgi_disable_crt1_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08); -} - #endif From d3c8e98dd9ccc366513c117d032fbf80be4eb06a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:20:37 -0700 Subject: [PATCH 255/437] Move dwWriteReg to xgi_cmdlist.c, the only file where it is used. --- linux-core/xgi_cmdlist.c | 14 ++++++++++++++ linux-core/xgi_regs.h | 13 ------------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index a1ec5720..a728c0ef 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -34,6 +34,20 @@ static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); + +/** + * Graphic engine register (2d/3d) acessing interface + */ +static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) +{ +#ifdef XGI_MMIO_DEBUG + DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", + map->handle, addr, data); +#endif + DRM_WRITE32(map, addr, data); +} + + int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, struct drm_file * filp) { diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 098d824d..2f9fbe64 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -157,17 +157,4 @@ static inline u8 IN3CFB(struct drm_map * map, u8 index) return DRM_READ8(map, 0x3CF); } - -/* - * Graphic engine register (2d/3d) acessing interface - */ -static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) -{ -#ifdef XGI_MMIO_DEBUG - DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", - map->handle, addr, data); -#endif - DRM_WRITE32(map, addr, data); -} - #endif From 7b12174aacd09a991be3e74a3db47534961a6887 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:24:02 -0700 Subject: [PATCH 256/437] Clean up remaining C++ style comments. --- linux-core/xgi_misc.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index be75e970..8232de9a 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -202,15 +202,18 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool ret = FALSE; u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (IN3CFB(info->mmio_map, 0x37) & 0x01) // CRT1 interrupt just happened - { + /* CRT1 interrupt just happened + */ + if (IN3CFB(info->mmio_map, 0x37) & 0x01) { u8 op3cf_3d; u8 op3cf_37; - // What happened? + /* What happened? + */ op3cf_37 = IN3CFB(info->mmio_map, 0x37); - // Clear CRT interrupt + /* Clear CRT interrupt + */ op3cf_3d = IN3CFB(info->mmio_map, 0x3d); OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04)); OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04)); @@ -226,25 +229,30 @@ bool xgi_dvi_irq_handler(struct xgi_info * info) bool ret = FALSE; const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (IN3CFB(info->mmio_map, 0x38) & 0x20) { // DVI interrupt just happened + /* DVI interrupt just happened + */ + if (IN3CFB(info->mmio_map, 0x38) & 0x20) { const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4); u8 op3cf_39; u8 op3cf_37; u8 op3x5_5a; - // What happened? + /* What happened? + */ op3cf_37 = IN3CFB(info->mmio_map, 0x37); - //Notify BIOS that DVI plug/unplug happened + /* Notify BIOS that DVI plug/unplug happened + */ op3x5_5a = IN3X5B(info->mmio_map, 0x5a); OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7); DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4); - // Clear DVI interrupt + /* Clear DVI interrupt + */ op3cf_39 = IN3CFB(info->mmio_map, 0x39); - OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 - OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); ret = TRUE; } From f563a50d145848ed296b63c63422caff80232ddf Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:44:51 -0700 Subject: [PATCH 257/437] Eliminate unused / useless ioctls. --- linux-core/xgi_drv.c | 6 ------ linux-core/xgi_drv.h | 14 +++----------- linux-core/xgi_misc.c | 33 --------------------------------- linux-core/xgi_pcie.c | 32 -------------------------------- shared-core/xgi_drm.h | 12 +----------- 5 files changed, 4 insertions(+), 93 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index fa418c0d..0e77d4cd 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -41,15 +41,9 @@ static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_XGI_GE_RESET, xgi_ge_reset_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_DUMP_REGISTER, xgi_dump_register_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_DEBUG_INFO, xgi_restore_registers_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_TEST_RWINKERNEL, xgi_test_rwinkernel_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), }; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 9f746de2..194313cd 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,11 +35,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070809" +#define DRIVER_DATE "20070814" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 2 +#define DRIVER_MINOR 12 +#define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -96,16 +96,8 @@ extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); -extern int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_dump_register_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); extern int xgi_submit_cmdlist(struct drm_device * dev, void * data, struct drm_file * filp); -extern int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); extern int xgi_state_change_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 8232de9a..50a721c0 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -29,18 +29,6 @@ #include -int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - xgi_disable_ge(info); - xgi_enable_ge(info); - - return 0; -} - - /* * irq functions */ @@ -324,27 +312,6 @@ void xgi_dump_register(struct xgi_info * info) } -int xgi_dump_register_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - xgi_dump_register(info); - return 0; -} - - -int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - OUT3X5B(info->mmio_map, 0x13, 0); - OUT3X5B(info->mmio_map, 0x8b, 2); - return 0; -} - - #define WHOLD_GE_STATUS 0x2800 /* Test everything except the "whole GE busy" bit, the "master engine busy" diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index a0f52740..b4d204c1 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -157,35 +157,3 @@ void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) return ((u8 *) info->dev->sg->virtual) + offset; } - -/* - address -- GE hw address -*/ -int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - u32 address = *(u32 *) data; - u32 *virtaddr = 0; - - - DRM_INFO("input GE HW addr is 0x%x\n", address); - - if (address == 0) { - return -EFAULT; - } - - virtaddr = (u32 *)xgi_find_pcie_virt(info, address); - - DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr); - - if (virtaddr != NULL) { - DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr); - *virtaddr = 0x00f00fff; - DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); - } else { - return -EFAULT; - } - - return 0; -} diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index c980a35a..d8715df5 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -122,22 +122,12 @@ struct xgi_state_info { #define DRM_XGI_ALLOC 1 #define DRM_XGI_FREE 2 #define DRM_XGI_SUBMIT_CMDLIST 3 -#define DRM_XGI_GE_RESET 4 -#define DRM_XGI_DUMP_REGISTER 5 -#define DRM_XGI_DEBUG_INFO 6 -#define DRM_XGI_TEST_RWINKERNEL 7 -#define DRM_XGI_STATE_CHANGE 8 +#define DRM_XGI_STATE_CHANGE 4 #define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap) - #define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32) - -#define XGI_IOCTL_GE_RESET DRM_IO(DRM_COMMAND_BASE + DRM_XGI_GE_RESET) -#define XGI_IOCTL_DUMP_REGISTER DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DUMP_REGISTER) -#define XGI_IOCTL_DEBUG_INFO DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DEBUG_INFO) #define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info) -#define XGI_IOCTL_TEST_RWINKERNEL DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_TEST_RWINKERNEL, __u32) #define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info) #endif /* _XGI_DRM_H_ */ From ee01d3755ac03f2c47e3b4d9bf084d68e6ee95bc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 13:34:57 +1000 Subject: [PATCH 258/437] nouveau: Workaround mysterious PRAMIN clobbering by the card. --- shared-core/nv04_instmem.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index b5569b55..fed6ff7e 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -94,6 +94,14 @@ int nv04_instmem_init(struct drm_device *dev) * the space that was reserved for RAMHT/FC/RO. */ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; + + /* On my NV4E, there's *something* clobbering the 16KiB just after + * where we setup these fixed tables. No idea what it is just yet, + * so reserve this space on all NV4X cards for now. + */ + if (dev_priv->card_type >= NV_40) + offset += 16*1024; + ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, offset, dev_priv->ramin_rsvd_vram - offset); if (ret) { From c3faa589b09616acdfd827be1719f6c2706c49ab Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 13:36:54 +1000 Subject: [PATCH 259/437] nouveau: Allow GART notifiers when using sgdma code. --- linux-core/nouveau_sgdma.c | 17 +++++++++++++++++ shared-core/nouveau_drv.h | 2 ++ shared-core/nouveau_notifier.c | 29 ++++++++++++++++++++--------- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index df970d11..97d5330b 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -316,3 +316,20 @@ nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) { } +int +nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + int pte; + + pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) { + *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; + return 0; + } + + DRM_ERROR("Unimplemented on NV50\n"); + return -EINVAL; +} + diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index b7459b1b..e96c8fad 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -421,6 +421,8 @@ extern void nouveau_irq_uninstall(struct drm_device *); /* nouveau_sgdma.c */ extern int nouveau_sgdma_init(struct drm_device *); extern void nouveau_sgdma_takedown(struct drm_device *); +extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, + uint32_t *page); extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 31547aae..91f605ec 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -37,14 +37,13 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) int flags, ret; /*TODO: PCI notifier blocks */ - if (dev_priv->agp_heap && - dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) - flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; - else if ( dev_priv->pci_heap ) + if (dev_priv->agp_heap) + flags = NOUVEAU_MEM_AGP; + else if (dev_priv->pci_heap) flags = NOUVEAU_MEM_PCI; else flags = NOUVEAU_MEM_FB; - flags |= NOUVEAU_MEM_MAPPED; + flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE); DRM_DEBUG("Allocating notifier block in %d\n", flags); chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, @@ -88,6 +87,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int count, uint32_t *b_offset) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct mem_block *mem; uint32_t offset; @@ -107,18 +107,29 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, } mem->flags = NOUVEAU_MEM_NOTIFIER; - offset = chan->notifier_block->start + mem->start; + offset = chan->notifier_block->start; if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { target = NV_DMA_TARGET_VIDMEM; - } else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { - target = NV_DMA_TARGET_AGP; - } else if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) { + } else + if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && + dev_priv->card_type < NV_50) { + ret = nouveau_sgdma_get_page(dev, offset, &offset); + if (ret) + return ret; + target = NV_DMA_TARGET_PCI; + } else { + target = NV_DMA_TARGET_AGP; + } + } else + if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) { target = NV_DMA_TARGET_PCI_NONLINEAR; } else { DRM_ERROR("Bad DMA target, flags 0x%08x!\n", chan->notifier_block->flags); return -EINVAL; } + offset += mem->start; if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, mem->size, From a615d2fde77092062f7e2bbfa39705b5f34547e8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 13:53:58 +1000 Subject: [PATCH 260/437] nouveau: Turn some messages into DRM_DEBUGs.. --- shared-core/nouveau_mem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 419522f4..92fa6b05 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -531,13 +531,13 @@ alloc_ok: block->map_handle = entry->user_token; } - DRM_INFO("allocated 0x%llx\n", block->start); + DRM_DEBUG("allocated 0x%llx type=0x%08x\n", block->start, block->flags); return block; } void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) { - DRM_INFO("freeing 0x%llx\n", block->start); + DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); if (block->flags&NOUVEAU_MEM_MAPPED) drm_rmmap(dev, block->map); nouveau_mem_free_block(block); From 10f9b7bd0b471487371813083bd3481629b2a56f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 14:14:23 +1000 Subject: [PATCH 261/437] nouveau: Use count parameter in nouveau_notifier_alloc(). --- shared-core/nouveau_notifier.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 91f605ec..71b8cbe1 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -45,11 +45,12 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) flags = NOUVEAU_MEM_FB; flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE); - DRM_DEBUG("Allocating notifier block in %d\n", flags); chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, (struct drm_file *)-2); if (!chan->notifier_block) return -ENOMEM; + DRM_DEBUG("Allocated notifier block in 0x%08x\n", + chan->notifier_block->flags); ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, chan->notifier_block->size); @@ -99,7 +100,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, return -EINVAL; } - mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, + mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0, (struct drm_file *)-2); if (!mem) { DRM_ERROR("Channel %d notifier block full\n", chan->id); From 5346fc5f36b5e7c55fc7b5cd46f1e4d7563a86a4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 14 Aug 2007 14:41:24 -0700 Subject: [PATCH 262/437] BSD: Replace brief description in each file's first line with doxygen later on. The brief descriptions usually had the wrong filename in them. --- bsd-core/ati_pcigart.c | 8 +++++--- bsd-core/drm_agpsupport.c | 8 +++++--- bsd-core/drm_auth.c | 8 +++++--- bsd-core/drm_bufs.c | 7 ++++--- bsd-core/drm_context.c | 7 ++++--- bsd-core/drm_dma.c | 11 ++++++++--- bsd-core/drm_drawable.c | 8 +++++--- bsd-core/drm_drv.c | 9 ++++++--- bsd-core/drm_fops.c | 8 +++++--- bsd-core/drm_ioctl.c | 8 +++++--- bsd-core/drm_irq.c | 8 +++++--- bsd-core/drm_lock.c | 22 +++++++++++++++++++--- bsd-core/drm_memory.c | 11 ++++++++--- bsd-core/drm_pci.c | 14 +++++++------- bsd-core/drm_scatter.c | 9 +++++++-- bsd-core/drm_sysctl.c | 5 +++++ bsd-core/drm_vm.c | 4 ++++ 17 files changed, 107 insertions(+), 48 deletions(-) diff --git a/bsd-core/ati_pcigart.c b/bsd-core/ati_pcigart.c index 682eace6..db19a75d 100644 --- a/bsd-core/ati_pcigart.c +++ b/bsd-core/ati_pcigart.c @@ -1,6 +1,3 @@ -/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*- - * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com - */ /*- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. @@ -29,6 +26,11 @@ * */ +/** @file ati_pcigart.c + * Implementation of ATI's PCIGART, which provides an aperture in card virtual + * address space with addresses remapped to system memory. + */ + #include "drmP.h" #define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */ diff --git a/bsd-core/drm_agpsupport.c b/bsd-core/drm_agpsupport.c index e8e162de..9aed5572 100644 --- a/bsd-core/drm_agpsupport.c +++ b/bsd-core/drm_agpsupport.c @@ -1,6 +1,3 @@ -/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*- - * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com - */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,11 @@ * */ +/** @file drm_agpsupport.c + * Support code for tying the kernel AGP support to DRM drivers and + * the DRM's AGP ioctls. + */ + #include "drmP.h" #ifdef __FreeBSD__ diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c index 9b5f4f74..aa8238c4 100644 --- a/bsd-core/drm_auth.c +++ b/bsd-core/drm_auth.c @@ -1,6 +1,3 @@ -/* drm_auth.c -- IOCTLs for authentication -*- linux-c -*- - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,11 @@ * */ +/** @file drm_auth.c + * Implementation of the get/authmagic ioctls implementing the authentication + * scheme between the master and clients. + */ + #include "drmP.h" static int drm_hash_magic(drm_magic_t magic) diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index a0a3fc73..65d8c82b 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -1,6 +1,3 @@ -/* drm_bufs.h -- Generic buffer template -*- linux-c -*- - * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com - */ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,10 @@ * */ +/** @file drm_bufs.c + * Implementation of the ioctls for setup of DRM mappings and DMA buffers. + */ + #include "dev/pci/pcireg.h" #include "drmP.h" diff --git a/bsd-core/drm_context.c b/bsd-core/drm_context.c index e34e8759..4155ee92 100644 --- a/bsd-core/drm_context.c +++ b/bsd-core/drm_context.c @@ -1,6 +1,3 @@ -/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*- - * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com - */ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,10 @@ * */ +/** @file drm_context.c + * Implementation of the context management ioctls. + */ + #include "drmP.h" /* ================================================================ diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c index 4896cf22..71ef845b 100644 --- a/bsd-core/drm_dma.c +++ b/bsd-core/drm_dma.c @@ -1,6 +1,3 @@ -/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*- - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - */ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,14 @@ * */ +/** @file drm_dma.c + * Support code for DMA buffer management. + * + * The implementation used to be significantly more complicated, but the + * complexity has been moved into the drivers as different buffer management + * schemes evolved. + */ + #include "drmP.h" int drm_dma_setup(drm_device_t *dev) diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index 7e038ab9..fb318d47 100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -1,6 +1,3 @@ -/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*- - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,11 @@ * */ +/** @file drm_drawable.c + * This file implements ioctls to store information along with DRM drawables, + * such as the current set of cliprects for vblank-synced buffer swaps. + */ + #include "drmP.h" struct bsd_drm_drawable_info { diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index a978f50f..208f4382 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -1,6 +1,3 @@ -/* drm_drv.h -- Generic driver template -*- linux-c -*- - * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com - */ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,12 @@ * */ +/** @file drm_drv.c + * The catch-all file for DRM device support, including module setup/teardown, + * open/close, and ioctl dispatch. + */ + + #include #include "drmP.h" #include "drm.h" diff --git a/bsd-core/drm_fops.c b/bsd-core/drm_fops.c index 2d037ea5..20bae8d7 100644 --- a/bsd-core/drm_fops.c +++ b/bsd-core/drm_fops.c @@ -1,6 +1,3 @@ -/* drm_fops.h -- File operations for DRM -*- linux-c -*- - * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com - */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -32,6 +29,11 @@ * */ +/** @file drm_fops.c + * Support code for dealing with the file privates associated with each + * open of the DRM device. + */ + #include "drmP.h" drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p) diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index ebdb2140..328f1d1e 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -1,6 +1,3 @@ -/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*- - * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com - */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,11 @@ * */ +/** @file drm_ioctl.c + * Varios minor DRM ioctls not applicable to other files, such as versioning + * information and reporting DRM information to userland. + */ + #include "drmP.h" /* diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 9c437e9d..0772445a 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -1,6 +1,3 @@ -/* drm_irq.c -- IRQ IOCTL and function support - * Created: Fri Oct 18 2003 by anholt@FreeBSD.org - */ /*- * Copyright 2003 Eric Anholt * All Rights Reserved. @@ -28,6 +25,11 @@ * */ +/** @file drm_irq.c + * Support code for handling setup/teardown of interrupt handlers and + * handing interrupt handlers off to the drivers. + */ + #include "drmP.h" #include "drm.h" diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index 5acb13d3..326c083b 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -1,6 +1,3 @@ -/* lock.c -- IOCTLs for locking -*- linux-c -*- - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,25 @@ * */ +/** @file drm_lock.c + * Implementation of the ioctls and other support code for dealing with the + * hardware lock. + * + * The DRM hardware lock is a shared structure between the kernel and userland. + * + * On uncontended access where the new context was the last context, the + * client may take the lock without dropping down into the kernel, using atomic + * compare-and-set. + * + * If the client finds during compare-and-set that it was not the last owner + * of the lock, it calls the DRM lock ioctl, which may sleep waiting for the + * lock, and may have side-effects of kernel-managed context switching. + * + * When the client releases the lock, if the lock is marked as being contended + * by another client, then the DRM unlock ioctl is called so that the + * contending client may be woken up. + */ + #include "drmP.h" int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) diff --git a/bsd-core/drm_memory.c b/bsd-core/drm_memory.c index 6d467e98..1f1f7f4b 100644 --- a/bsd-core/drm_memory.c +++ b/bsd-core/drm_memory.c @@ -1,6 +1,3 @@ -/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*- - * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com - */ /*- *Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. @@ -31,6 +28,14 @@ * */ +/** @file drm_memory.c + * Wrappers for kernel memory allocation routines, and MTRR management support. + * + * This file previously implemented a memory consumption tracking system using + * the "area" argument for various different types of allocations, but that + * has been stripped out for now. + */ + #include "drmP.h" MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures"); diff --git a/bsd-core/drm_pci.c b/bsd-core/drm_pci.c index a33f5f9c..6ec6b983 100644 --- a/bsd-core/drm_pci.c +++ b/bsd-core/drm_pci.c @@ -1,10 +1,3 @@ -/** - * \file drm_pci.h - * \brief PCI consistent, DMA-accessible memory functions. - * - * \author Eric Anholt - */ - /*- * Copyright 2003 Eric Anholt. * All Rights Reserved. @@ -28,6 +21,13 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/** + * \file drm_pci.h + * \brief PCI consistent, DMA-accessible memory allocation. + * + * \author Eric Anholt + */ + #include "drmP.h" /**********************************************************************/ diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c index 91c3c6c5..92e715e0 100644 --- a/bsd-core/drm_scatter.c +++ b/bsd-core/drm_scatter.c @@ -1,5 +1,3 @@ -/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*- - * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */ /*- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. @@ -29,6 +27,13 @@ * */ +/** @file drm_scatter.c + * Allocation of memory for scatter-gather mappings by the graphics chip. + * + * The memory allocated here is then made into an aperture in the card + * by drm_ati_pcigart_init(). + */ + #include "drmP.h" #define DEBUG_SCATTER 0 diff --git a/bsd-core/drm_sysctl.c b/bsd-core/drm_sysctl.c index b2d0cc0c..3de5b8ae 100644 --- a/bsd-core/drm_sysctl.c +++ b/bsd-core/drm_sysctl.c @@ -21,6 +21,11 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/** @file drm_sysctl.c + * Implementation of various sysctls for controlling DRM behavior and reporting + * debug information. + */ + #include "drmP.h" #include "drm.h" diff --git a/bsd-core/drm_vm.c b/bsd-core/drm_vm.c index af1dbaa8..fea31f52 100644 --- a/bsd-core/drm_vm.c +++ b/bsd-core/drm_vm.c @@ -21,6 +21,10 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/** @file drm_vm.c + * Support code for mmaping of DRM maps. + */ + #include "drmP.h" #include "drm.h" From 8a881b47f7c21be2cdeff4b0d1b00d39d503f358 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 10:52:01 -0700 Subject: [PATCH 263/437] Add simple regression test for getstats (does it not crash the kernel?). --- .gitignore | 1 + tests/Makefile.am | 1 + tests/getstats.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 tests/getstats.c diff --git a/.gitignore b/.gitignore index f63c1532..47c7d4a9 100644 --- a/.gitignore +++ b/.gitignore @@ -55,6 +55,7 @@ tests/auth tests/dristat tests/drmstat tests/getclient +tests/getstats tests/getversion tests/openclose tests/updatedraw diff --git a/tests/Makefile.am b/tests/Makefile.am index e846efbe..b5f8c04d 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -19,6 +19,7 @@ TESTS = auth \ openclose \ getversion \ getclient \ + getstats \ updatedraw EXTRA_PROGRAMS = $(TESTS) diff --git a/tests/getstats.c b/tests/getstats.c new file mode 100644 index 00000000..bd55b12e --- /dev/null +++ b/tests/getstats.c @@ -0,0 +1,51 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include "drmtest.h" + +/** + * Checks DRM_IOCTL_GET_STATS. + * + * I don't care too much about the actual contents, just that the kernel + * doesn't crash. + */ +int main(int argc, char **argv) +{ + int fd, ret; + drm_stats_t stats; + + fd = drm_open_any(); + + ret = ioctl(fd, DRM_IOCTL_GET_STATS, &stats); + assert(ret == 0); + + assert(stats.count >= 0); + + close(fd); + return 0; +} From a23a47b16cf813f0e7e9616ef6eb66f6ae0bc2ac Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 11:03:10 -0700 Subject: [PATCH 264/437] Add a regression test for the setversion interface. --- .gitignore | 1 + tests/Makefile.am | 1 + tests/setversion.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 tests/setversion.c diff --git a/.gitignore b/.gitignore index 47c7d4a9..8ae98e81 100644 --- a/.gitignore +++ b/.gitignore @@ -58,4 +58,5 @@ tests/getclient tests/getstats tests/getversion tests/openclose +tests/setversion tests/updatedraw diff --git a/tests/Makefile.am b/tests/Makefile.am index b5f8c04d..acd12b34 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -20,6 +20,7 @@ TESTS = auth \ getversion \ getclient \ getstats \ + setversion \ updatedraw EXTRA_PROGRAMS = $(TESTS) diff --git a/tests/setversion.c b/tests/setversion.c new file mode 100644 index 00000000..dc6eccf7 --- /dev/null +++ b/tests/setversion.c @@ -0,0 +1,84 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include "drmtest.h" + +/** + * Checks DRM_IOCTL_SET_VERSION. + * + * This tests that we can get the actual version out, and that setting invalid + * major/minor numbers fails appropriately. It does not check the actual + * behavior differenses resulting from an increased DI version. + */ +int main(int argc, char **argv) +{ + int fd, ret; + drm_set_version_t sv, version; + + fd = drm_open_any(); + + /* First, check that we can get the DD/DI versions. */ + memset(&version, 0, sizeof(version)); + version.drm_di_major = -1; + version.drm_di_minor = -1; + version.drm_dd_major = -1; + version.drm_dd_minor = -1; + ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &version); + assert(ret == 0); + assert(version.drm_di_major != -1); + assert(version.drm_di_minor != -1); + assert(version.drm_dd_major != -1); + assert(version.drm_dd_minor != -1); + + /* Check that an invalid DI major fails */ + sv = version; + sv.drm_di_major++; + ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv); + assert(ret == -1 && errno == EINVAL); + + /* Check that an invalid DI minor fails */ + sv = version; + sv.drm_di_major++; + ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv); + assert(ret == -1 && errno == EINVAL); + + /* Check that an invalid DD major fails */ + sv = version; + sv.drm_dd_major++; + ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv); + assert(ret == -1 && errno == EINVAL); + + /* Check that an invalid DD minor fails */ + sv = version; + sv.drm_dd_minor++; + ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv); + assert(ret == -1 && errno == EINVAL); + + close(fd); + return 0; +} From 56133e04de40e3004018d069cb229e62ee10e0f4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 11:04:56 -0700 Subject: [PATCH 265/437] BSD: Fix regression in setversion ioctl (current version not returned). --- bsd-core/drm_ioctl.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index 328f1d1e..d6af4284 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -232,23 +232,27 @@ int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv) int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv) { drm_set_version_t *sv = data; - drm_set_version_t retv; + drm_set_version_t ver; int if_version; - retv.drm_di_major = DRM_IF_MAJOR; - retv.drm_di_minor = DRM_IF_MINOR; - retv.drm_dd_major = dev->driver.major; - retv.drm_dd_minor = dev->driver.minor; + /* Save the incoming data, and set the response before continuing + * any further. + */ + ver = *sv; + sv->drm_di_major = DRM_IF_MAJOR; + sv->drm_di_minor = DRM_IF_MINOR; + sv->drm_dd_major = dev->driver.major; + sv->drm_dd_minor = dev->driver.minor; - if (sv->drm_di_major != -1) { - if (sv->drm_di_major != DRM_IF_MAJOR || - sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { + if (ver.drm_di_major != -1) { + if (ver.drm_di_major != DRM_IF_MAJOR || + ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) { return EINVAL; } - if_version = DRM_IF_VERSION(sv->drm_di_major, - sv->drm_dd_minor); + if_version = DRM_IF_VERSION(ver.drm_di_major, + ver.drm_dd_minor); dev->if_version = DRM_MAX(if_version, dev->if_version); - if (sv->drm_di_minor >= 1) { + if (ver.drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device */ @@ -256,10 +260,10 @@ int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv) } } - if (sv->drm_dd_major != -1) { - if (sv->drm_dd_major != dev->driver.major || - sv->drm_dd_minor < 0 || - sv->drm_dd_minor > dev->driver.minor) + if (ver.drm_dd_major != -1) { + if (ver.drm_dd_major != dev->driver.major || + ver.drm_dd_minor < 0 || + ver.drm_dd_minor > dev->driver.minor) { return EINVAL; } From 4cdd871e90cd5fe440d0a4af5d69f1d84e49e742 Mon Sep 17 00:00:00 2001 From: vehemens Date: Wed, 15 Aug 2007 11:05:44 -0700 Subject: [PATCH 266/437] Bug #11989: Fix regression in getstats ioctl (kernel panic). --- bsd-core/drm_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index d6af4284..ce78bb8f 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -205,7 +205,7 @@ int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv) drm_stats_t *stats = data; int i; - memset(&stats, 0, sizeof(stats)); + memset(stats, 0, sizeof(drm_stats_t)); DRM_LOCK(); From a9ee144eab5bbd5f90747c38cdc016da46c124fe Mon Sep 17 00:00:00 2001 From: vehemens Date: Wed, 15 Aug 2007 11:12:46 -0700 Subject: [PATCH 267/437] BSD: simplify drm_ioctl() after other refactoring. --- bsd-core/drm_drv.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 208f4382..afd90351 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -821,14 +821,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p) { -#ifdef __FreeBSD__ - drm_device_t *dev = kdev->si_drv1; -#elif defined(__NetBSD__) - drm_device_t *dev = device_lookup(&drm_cd, minor(kdev)); -#else - drm_device_t *dev = device_lookup(&drm_cd, - minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)]; -#endif + drm_device_t *dev = drm_get_device_from_kdev(kdev); int retcode = 0; drm_ioctl_desc_t *ioctl; int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv); @@ -915,15 +908,13 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, ((ioctl->flags & DRM_MASTER) && !file_priv->master)) return EACCES; - if (is_driver_ioctl) - DRM_LOCK(); - retcode = func(dev, data, file_priv); if (is_driver_ioctl) { + DRM_LOCK(); + /* shared code returns -errno */ + retcode = -func(dev, data, file_priv); DRM_UNLOCK(); - /* Driver ioctls in shared code follow the linux convention of - * returning -errno instead of errno. - */ - retcode = -retcode; + } else { + retcode = func(dev, data, file_priv); } if (retcode != 0) From d1a2b26a99205b802919aa0901b4e19cb2d251fe Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 13:08:19 -0700 Subject: [PATCH 268/437] Require master in setversion test, since it requires auth. --- tests/setversion.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/setversion.c b/tests/setversion.c index dc6eccf7..f4bfbfba 100644 --- a/tests/setversion.c +++ b/tests/setversion.c @@ -40,7 +40,7 @@ int main(int argc, char **argv) int fd, ret; drm_set_version_t sv, version; - fd = drm_open_any(); + fd = drm_open_any_master(); /* First, check that we can get the DD/DI versions. */ memset(&version, 0, sizeof(version)); From 8a1ca401b403848d894f664977f9e939eaf07291 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 13:13:24 -0700 Subject: [PATCH 269/437] Fix a bad error message in auth.c regression test. --- tests/auth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/auth.c b/tests/auth.c index 45265d6c..4160d1de 100644 --- a/tests/auth.c +++ b/tests/auth.c @@ -108,7 +108,7 @@ static void server() ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth); if (ret == -1) - err(1, "Authenticating bad magic succeeded\n"); + err(1, "Failure to authenticate client magic\n"); wait_event(1, CLIENT_DONE); } From 9254e00e4bbbc02282415cd0ca7bd6b5cb52be82 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 13:41:24 -0700 Subject: [PATCH 270/437] Add a set of tests for DRM locking, exposing issues on BSD. --- .gitignore | 1 + tests/Makefile.am | 1 + tests/lock.c | 262 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 264 insertions(+) create mode 100644 tests/lock.c diff --git a/.gitignore b/.gitignore index 8ae98e81..0991da8c 100644 --- a/.gitignore +++ b/.gitignore @@ -57,6 +57,7 @@ tests/drmstat tests/getclient tests/getstats tests/getversion +tests/lock tests/openclose tests/setversion tests/updatedraw diff --git a/tests/Makefile.am b/tests/Makefile.am index acd12b34..dce1754e 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -20,6 +20,7 @@ TESTS = auth \ getversion \ getclient \ getstats \ + lock \ setversion \ updatedraw diff --git a/tests/lock.c b/tests/lock.c new file mode 100644 index 00000000..3f627558 --- /dev/null +++ b/tests/lock.c @@ -0,0 +1,262 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +/** @file lock.c + * Tests various potential failures of the DRM locking mechanisms + */ + +#include +#include "drmtest.h" + +enum auth_event { + SERVER_READY, + CLIENT_MAGIC, + SERVER_LOCKED, + CLIENT_LOCKED, +}; + +int commfd[2]; +unsigned int lock1 = 0x00001111; +unsigned int lock2 = 0x00002222; + +/* return time in milliseconds */ +static unsigned int +get_millis() +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000 + tv.tv_usec / 1000; +} + +static void +wait_event(int pipe, enum auth_event expected_event) +{ + int ret; + enum auth_event event; + unsigned char in; + + ret = read(commfd[pipe], &in, 1); + if (ret == -1) + err(1, "read error"); + event = in; + + if (event != expected_event) + errx(1, "unexpected event: %d\n", event); +} + +static void +send_event(int pipe, enum auth_event send_event) +{ + int ret; + unsigned char event; + + event = send_event; + ret = write(commfd[pipe], &event, 1); + if (ret == -1) + err(1, "failed to send event %d", event); +} + +static void +client_auth(int drmfd) +{ + struct drm_auth auth; + int ret; + + wait_event(0, SERVER_READY); + + /* Get a client magic number and pass it to the master for auth. */ + ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth); + if (ret == -1) + err(1, "Couldn't get client magic"); + send_event(0, CLIENT_MAGIC); + ret = write(commfd[0], &auth.magic, sizeof(auth.magic)); + if (ret == -1) + err(1, "Couldn't write auth data"); +} + +static void +server_auth(int drmfd) +{ + struct drm_auth auth; + int ret; + + send_event(1, SERVER_READY); + wait_event(1, CLIENT_MAGIC); + ret = read(commfd[1], &auth.magic, sizeof(auth.magic)); + if (ret == -1) + err(1, "Failure to read client magic"); + + ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth); + if (ret == -1) + err(1, "Failure to authenticate client magic\n"); +} + +/** Tests that locking is successful in normal conditions */ +static void +test_lock_unlock(int drmfd) +{ + int ret; + + ret = drmGetLock(drmfd, lock1, 0); + if (ret != 0) + err(1, "Locking failed"); + ret = drmUnlock(drmfd, lock1); + if (ret != 0) + err(1, "Unlocking failed"); +} + +/** Tests that unlocking the lock while it's not held works correctly */ +static void +test_unlock_unlocked(int drmfd) +{ + int ret; + + ret = drmUnlock(drmfd, lock1); + if (ret == 0) + err(1, "Unlocking unlocked lock succeeded"); +} + +/** Tests that unlocking a lock held by another context fails appropriately */ +static void +test_unlock_unowned(int drmfd) +{ + int ret; + + ret = drmGetLock(drmfd, lock1, 0); + assert(ret == 0); + ret = drmUnlock(drmfd, lock2); + if (ret == 0) + errx(1, "Unlocking other context's lock succeeded"); + ret = drmUnlock(drmfd, lock1); + assert(ret == 0); +} + +/** + * Tests that an open/close by the same process doesn't result in the lock + * being dropped. + */ +static void test_open_close_locked(drmfd) +{ + int ret, tempfd; + + ret = drmGetLock(drmfd, lock1, 0); + assert(ret == 0); + /* XXX: Need to make sure that this is the same device as drmfd */ + tempfd = drm_open_any(); + close(tempfd); + ret = drmUnlock(drmfd, lock1); + if (ret != 0) + errx(1, "lock lost during open/close by same pid"); + + close(drmfd); +} + +static void client() +{ + int drmfd, ret; + unsigned int time; + + /* XXX: Should make sure we open the same DRM as the master */ + drmfd = drm_open_any(); + + client_auth(drmfd); + + /* Wait for the server to grab the lock, then grab it ourselves (to + * contest it). Hopefully we hit it within the window of when the + * server locks. + */ + wait_event(0, SERVER_LOCKED); + ret = drmGetLock(drmfd, lock2, 0); + time = get_millis(); + if (ret != 0) + err(1, "Failed to get lock on client\n"); + drmUnlock(drmfd, lock2); + + /* Tell the server that our locking completed, and when it did */ + send_event(0, CLIENT_LOCKED); + ret = write(commfd[0], &time, sizeof(time)); + + exit(0); +} + +static void server() +{ + int drmfd, tempfd, ret; + unsigned int client_time, unlock_time; + + drmfd = drm_open_any_master(); + + test_lock_unlock(drmfd); + test_unlock_unlocked(drmfd); + test_unlock_unowned(drmfd); + test_open_close_locked(drmfd); + + /* Perform the authentication sequence with the client. */ + server_auth(drmfd); + + /* Now, test that the client attempting to lock while the server + * holds the lock works correctly. + */ + ret = drmGetLock(drmfd, lock1, 0); + assert(ret == 0); + send_event(1, SERVER_LOCKED); + /* Wait a while for the client to do its thing */ + sleep(1); + ret = drmUnlock(drmfd, lock1); + assert(ret == 0); + unlock_time = get_millis(); + + wait_event(1, CLIENT_LOCKED); + ret = read(commfd[1], &client_time, sizeof(client_time)); + if (ret == -1) + err(1, "Failure to read client magic"); + + if (client_time < unlock_time) + errx(1, "Client took lock before server released it"); +} + +int main(int argc, char **argv) +{ + int ret; + + + ret = pipe(commfd); + if (ret == -1) + err(1, "Couldn't create pipe"); + + ret = fork(); + if (ret == -1) + err(1, "failure to fork client"); + if (ret == 0) + client(); + else + server(); + + return 0; +} + From 6e93c35ba7c5001e756d0c9d1a4f534384652a5a Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 13:42:04 -0700 Subject: [PATCH 271/437] BSD: Return EINVAL if drm_unlock is called on an unheld or other-owner lock. --- bsd-core/drm_lock.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index 326c083b..fb86fc68 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -173,6 +173,12 @@ int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv) DRM_CURRENTPID, lock->context); return EINVAL; } + /* Check that the context unlock being requested actually matches + * who currently holds the lock. + */ + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context) + return EINVAL; atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); From b668d6d9050106bebfb704e4ed32d2924bb26371 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 14:29:31 -0700 Subject: [PATCH 272/437] Fix dev->agp->base initialization on BSD, and fix addmap range check on Linux. With the previous linux commit, an AGP aperture at the end of the address space would have wrapped to 0 and the test would have failed. --- bsd-core/drm_agpsupport.c | 2 +- bsd-core/drm_bufs.c | 12 +++++++++++- linux-core/drm_bufs.c | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/bsd-core/drm_agpsupport.c b/bsd-core/drm_agpsupport.c index 9aed5572..6f963b9c 100644 --- a/bsd-core/drm_agpsupport.c +++ b/bsd-core/drm_agpsupport.c @@ -184,7 +184,6 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) dev->agp->mode = mode.mode; agp_enable(dev->agp->agpdev, mode.mode); - dev->agp->base = dev->agp->info.ai_aperture_base; dev->agp->enabled = 1; return 0; } @@ -405,6 +404,7 @@ drm_agp_head_t *drm_agp_init(void) return NULL; head->agpdev = agpdev; agp_get_info(agpdev, &head->info); + head->base = head->info.ai_aperture_base; head->memory = NULL; DRM_INFO("AGP at 0x%08lx %dMB\n", (long)head->info.ai_aperture_base, diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index 65d8c82b..9b58c593 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -191,7 +191,17 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size, break; case _DRM_AGP: /*valid = 0;*/ - map->offset += dev->agp->base; + /* In some cases (i810 driver), user space may have already + * added the AGP base itself, because dev->agp->base previously + * only got set during AGP enable. So, only add the base + * address if the map's offset isn't already within the + * aperture. + */ + if (map->offset < dev->agp->base || + map->offset > dev->agp->base + + dev->agp->info.ai_aperture_size - 1) { + map->offset += dev->agp->base; + } map->mtrr = dev->agp->mtrr; /* for getmap */ /*for (entry = dev->agp->memory; entry; entry = entry->next) { if ((map->offset >= entry->bound) && diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index f0b28fa1..60eca60c 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -231,7 +231,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset, */ if (map->offset < dev->agp->base || map->offset > dev->agp->base + - dev->agp->agp_info.aper_size * 1024 * 1024) { + dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { map->offset += dev->agp->base; } map->mtrr = dev->agp->agp_mtrr; /* for getmap */ From d8a800b63de09f41d482d2b3367e4da67ed0f92b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 15 Aug 2007 21:05:26 -0700 Subject: [PATCH 273/437] Implement fence support. --- linux-core/Makefile.kernel | 3 +- linux-core/xgi_cmdlist.c | 76 ++++++++++++++++++++++++++++++-------- linux-core/xgi_cmdlist.h | 2 + linux-core/xgi_drv.c | 18 +++++++++ linux-core/xgi_drv.h | 13 ++++++- linux-core/xgi_regs.h | 9 +++++ 6 files changed, 103 insertions(+), 18 deletions(-) diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index c898206d..c651b0b2 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -38,7 +38,8 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ via_video.o via_dmablit.o via_fence.o via_buffer.o mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o nv-objs := nv_drv.o -xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o +xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \ + xgi_fence.o ifeq ($(CONFIG_COMPAT),y) drm-objs += drm_ioc32.o diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index a728c0ef..5409892a 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -28,8 +28,10 @@ #include "xgi_regs.h" #include "xgi_misc.h" #include "xgi_cmdlist.h" +#include -static void xgi_emit_flush(struct xgi_info * info, bool link); +static void xgi_emit_flush(struct xgi_info * info, bool stop); +static void xgi_emit_nop(struct xgi_info * info); static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); @@ -101,7 +103,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, begin[0] = (cmd << 24) | BEGIN_VALID_MASK - | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id); + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size; begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; @@ -134,19 +136,20 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { - xgi_emit_flush(info, TRUE); + xgi_emit_flush(info, FALSE); } info->cmdring.last_ptr[1] = begin[1]; info->cmdring.last_ptr[2] = begin[2]; info->cmdring.last_ptr[3] = begin[3]; - wmb(); + DRM_WRITEMEMORYBARRIER(); info->cmdring.last_ptr[0] = begin[0]; triggerHWCommandList(info); } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); + drm_fence_flush_old(info->dev, 0, info->next_sequence); return 0; } @@ -213,9 +216,11 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) */ if (info->cmdring.last_ptr != NULL) { xgi_emit_flush(info, FALSE); - xgi_waitfor_pci_idle(info); + xgi_emit_nop(info); } + xgi_waitfor_pci_idle(info); + (void) memset(&info->cmdring, 0, sizeof(info->cmdring)); } } @@ -233,23 +238,25 @@ static void triggerHWCommandList(struct xgi_info * info) /** * Emit a flush to the CRTL command stream. * @info XGI info structure - * @link Emit (or don't emit) link information at start of flush command. * * This function assumes info->cmdring.ptr is non-NULL. */ -static void xgi_emit_flush(struct xgi_info * info, bool link) +void xgi_emit_flush(struct xgi_info * info, bool stop) { - static const u32 flush_command[8] = { - (0x10 << 24), + const u32 flush_command[8] = { + ((0x10 << 24) + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)), BEGIN_LINK_ENABLE_MASK | (0x00004), 0x00000000, 0x00000000, - /* Flush everything with the default 32 clock delay. + /* Flush the 2D engine with the default 32 clock delay. */ - 0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, }; - const unsigned int base = (link) ? 0 : 4; - const unsigned int flush_size = (8 - base) * sizeof(u32); + const unsigned int flush_size = sizeof(flush_command); u32 *batch_addr; u32 hw_addr; @@ -263,17 +270,54 @@ static void xgi_emit_flush(struct xgi_info * info, bool link) batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - (void) memcpy(batch_addr, & flush_command[base], flush_size); + (void) memcpy(batch_addr, flush_command, flush_size); + + if (stop) { + *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK; + } info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); info->cmdring.last_ptr[2] = hw_addr >> 4; info->cmdring.last_ptr[3] = 0; - wmb(); + DRM_WRITEMEMORYBARRIER(); info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK); triggerHWCommandList(info); info->cmdring.ring_offset += flush_size; - info->cmdring.last_ptr = (link) ? batch_addr : NULL; + info->cmdring.last_ptr = batch_addr; +} + + +/** + * Emit an empty command to the CRTL command stream. + * @info XGI info structure + * + * This function assumes info->cmdring.ptr is non-NULL. In addition, since + * this function emits a command that does not have linkage information, + * it sets info->cmdring.ptr to NULL. + */ +void xgi_emit_nop(struct xgi_info * info) +{ + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + info->cmdring.last_ptr[2] = 0; + info->cmdring.last_ptr[3] = 0; + DRM_WRITEMEMORYBARRIER(); + info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK); + + triggerHWCommandList(info); + + info->cmdring.last_ptr = NULL; +} + + +void xgi_emit_irq(struct xgi_info * info) +{ + if (info->cmdring.last_ptr == NULL) + return; + + xgi_emit_flush(info, TRUE); } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index dc3fbe5a..f6f1c1ef 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -61,4 +61,6 @@ extern int xgi_state_change(struct xgi_info * info, unsigned int to, extern void xgi_cmdlist_cleanup(struct xgi_info * info); +extern void xgi_emit_irq(struct xgi_info * info); + #endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 0e77d4cd..241cd39f 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -37,6 +37,17 @@ static struct pci_device_id pciidlist[] = { xgi_PCI_IDS }; +static struct drm_fence_driver xgi_fence_driver = { + .num_classes = 1, + .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK, + .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1, + .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK, + .lazy_capable = 1, + .emit = xgi_fence_emit_sequence, + .poke_flush = xgi_poke_flush, + .has_irq = xgi_fence_has_irq +}; + static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { @@ -95,6 +106,8 @@ static struct drm_driver driver = { .remove = __devexit_p(drm_cleanup_pci), }, + .fence_driver = &xgi_fence_driver, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -189,6 +202,10 @@ int xgi_bootstrap(struct drm_device * dev, void * data, int err; + spin_lock_init(&info->fence_lock); + info->next_sequence = 0; + info->complete_sequence = 0; + if (info->mmio_map == NULL) { err = drm_addmap(dev, info->mmio.base, info->mmio.size, _DRM_REGISTERS, _DRM_KERNEL, @@ -344,6 +361,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) DRM_WRITE32(info->mmio_map, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); + xgi_fence_handler(dev); return IRQ_HANDLED; } else { return IRQ_NONE; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 194313cd..c815f63e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_DATE "20070814" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 12 +#define DRIVER_MINOR 13 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" @@ -72,6 +72,10 @@ struct xgi_info { bool pcie_heap_initialized; struct xgi_cmdring_info cmdring; + + spinlock_t fence_lock; + unsigned complete_sequence; + unsigned next_sequence; }; extern int xgi_fb_heap_init(struct xgi_info * info); @@ -92,6 +96,13 @@ extern void xgi_disable_mmio(struct xgi_info * info); extern void xgi_enable_ge(struct xgi_info * info); extern void xgi_disable_ge(struct xgi_info * info); +extern void xgi_poke_flush(struct drm_device * dev, uint32_t class); +extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, + uint32_t flags, uint32_t * sequence, uint32_t * native_type); +extern void xgi_fence_handler(struct drm_device * dev); +extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, + uint32_t flags); + extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 2f9fbe64..5c0100a0 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -83,6 +83,14 @@ #define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) #define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK +#define M2REG_RESET_ADDRESS 0x004 +#define M2REG_RESET_COMMAND 0x01 +#define M2REG_RESET_STATUS2_MASK (ONE_BIT_MASK<<10) +#define M2REG_RESET_STATUS1_MASK (ONE_BIT_MASK<<9) +#define M2REG_RESET_STATUS0_MASK (ONE_BIT_MASK<<8) +#define M2REG_RESET_3DENG_MASK (ONE_BIT_MASK<<4) +#define M2REG_RESET_2DENG_MASK (ONE_BIT_MASK<<2) + /* Write register */ #define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010 #define M2REG_AUTO_LINK_SETTING_COMMAND 0x04 @@ -110,6 +118,7 @@ /** * Begin instruction, double-word 0 */ +#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK (ONE_BIT_MASK<<22) #define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) #define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK From 8a4d7f34d9c0182c466518c6f413d9a039db402d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 17 Aug 2007 01:12:46 +1000 Subject: [PATCH 274/437] nouveau: Detect memory on NFORCE/NFORCE2 correctly. --- shared-core/nouveau_mem.c | 43 ++++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 92fa6b05..30345797 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -219,6 +219,33 @@ void nouveau_mem_close(struct drm_device *dev) nouveau_mem_takedown(&dev_priv->pci_heap); } +/*XXX won't work on BSD because of pci_read_config_dword */ +static uint32_t +nouveau_mem_fb_amount_igp(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pci_dev *bridge; + uint32_t mem; + + bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1)); + if (!bridge) { + DRM_ERROR("no bridge device\n"); + return 0; + } + + if (dev_priv->flags&NV_NFORCE) { + pci_read_config_dword(bridge, 0x7C, &mem); + return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; + } else + if(dev_priv->flags&NV_NFORCE2) { + pci_read_config_dword(bridge, 0x84, &mem); + return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; + } + + DRM_ERROR("impossible!\n"); + return 0; +} + /* returns the amount of FB ram in bytes */ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) { @@ -263,18 +290,14 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) case NV_44: case NV_50: default: - // XXX won't work on BSD because of pci_read_config_dword - if (dev_priv->flags&NV_NFORCE) { - uint32_t mem; - pci_read_config_dword(dev->pdev, 0x7C, &mem); - return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; - } else if(dev_priv->flags&NV_NFORCE2) { - uint32_t mem; - pci_read_config_dword(dev->pdev, 0x84, &mem); - return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; + if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { + return nouveau_mem_fb_amount_igp(dev); } else { uint64_t mem; - mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; + + mem = (NV_READ(NV04_FIFO_DATA) & + NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> + NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; return mem*1024*1024; } break; From 0d3c741df19c35307723422c1f2f28a23995823d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 16 Aug 2007 13:43:04 -0700 Subject: [PATCH 275/437] Forgot to add this file on the last commit. --- linux-core/xgi_fence.c | 125 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 linux-core/xgi_fence.c diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c new file mode 100644 index 00000000..e5b545de --- /dev/null +++ b/linux-core/xgi_fence.c @@ -0,0 +1,125 @@ +/* + * (C) Copyright IBM Corporation 2007 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ian Romanick + */ + +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) +{ + struct xgi_info * info = dev->dev_private; + struct drm_fence_class_manager * fc = &dev->fm.class[class]; + uint32_t pending_flush_types = 0; + uint32_t signaled_flush_types = 0; + + + if ((info == NULL) || (class != 0)) + return 0; + + spin_lock(&info->fence_lock); + + pending_flush_types = fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); + + if (pending_flush_types) { + if (pending_flush_types & DRM_FENCE_TYPE_EXE) { + const u32 begin_id = DRM_READ32(info->mmio_map, + 0x2820) + & BEGIN_BEGIN_IDENTIFICATION_MASK; + + if (begin_id != info->complete_sequence) { + info->complete_sequence = begin_id; + signaled_flush_types |= DRM_FENCE_TYPE_EXE; + } + } + + if (signaled_flush_types) { + drm_fence_handler(dev, 0, info->complete_sequence, + signaled_flush_types); + } + } + + spin_unlock(&info->fence_lock); + + return fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); +} + + +int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, + uint32_t flags, uint32_t * sequence, + uint32_t * native_type) +{ + struct xgi_info * info = dev->dev_private; + + if ((info == NULL) || (class != 0)) + return -EINVAL; + + + spin_lock(&info->fence_lock); + info->next_sequence++; + if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { + info->next_sequence = 1; + } + spin_unlock(&info->fence_lock); + + + *sequence = (uint32_t) info->next_sequence; + *native_type = DRM_FENCE_TYPE_EXE; + + return 0; +} + + +void xgi_poke_flush(struct drm_device * dev, uint32_t class) +{ + struct drm_fence_manager * fm = &dev->fm; + unsigned long flags; + + + write_lock_irqsave(&fm->lock, flags); + xgi_do_flush(dev, class); + write_unlock_irqrestore(&fm->lock, flags); +} + + +void xgi_fence_handler(struct drm_device * dev) +{ + struct drm_fence_manager * fm = &dev->fm; + + + write_lock(&fm->lock); + xgi_do_flush(dev, 0); + write_unlock(&fm->lock); +} + + +int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) +{ + return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0; +} From 3383e8bd6bcd2323c81252e617c8522593baf818 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 17 Aug 2007 10:53:18 -0700 Subject: [PATCH 276/437] Remove unnecessary include. --- linux-core/xgi_cmdlist.c | 1 - 1 file changed, 1 deletion(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 5409892a..261f4e13 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -28,7 +28,6 @@ #include "xgi_regs.h" #include "xgi_misc.h" #include "xgi_cmdlist.h" -#include static void xgi_emit_flush(struct xgi_info * info, bool stop); static void xgi_emit_nop(struct xgi_info * info); From a122e7dabfaade751e8f6bb6d1488902fd36a40e Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Sun, 19 Aug 2007 18:41:18 +0200 Subject: [PATCH 277/437] Function pci_get_bus_and_slot needs 2.6.19 or later --- shared-core/nouveau_mem.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 30345797..3c294e4b 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -223,6 +223,7 @@ void nouveau_mem_close(struct drm_device *dev) static uint32_t nouveau_mem_fb_amount_igp(struct drm_device *dev) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; uint32_t mem; @@ -243,6 +244,10 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) } DRM_ERROR("impossible!\n"); +#else + DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n"); +#endif + return 0; } From c8760c7999b8aeb6d51b09c062331f518953a920 Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Sun, 19 Aug 2007 18:45:01 +0200 Subject: [PATCH 278/437] Check also for Linux, as it's not supported on different OS --- shared-core/nouveau_mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 3c294e4b..12d1ba75 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -223,7 +223,7 @@ void nouveau_mem_close(struct drm_device *dev) static uint32_t nouveau_mem_fb_amount_igp(struct drm_device *dev) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) +#if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; uint32_t mem; From 216f1b0573b2c0e39ac82c7f56235c1003e9bd4d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 21 Aug 2007 02:18:27 +1000 Subject: [PATCH 279/437] nouveau: Poke 0x2230 on NV47 also. Makes 0x2220 work the same way as on NV40. --- shared-core/nouveau_fifo.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index f0c2a556..1aa724f1 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -84,9 +84,16 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev) { case NV_50: case NV_40: + switch (dev_priv->chipset) { + case 0x47: + case 0x49: + case 0x4b: + NV_WRITE(0x2230, 1); + break; + default: + break; + } NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); - if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b)) - NV_WRITE(0x2230,0x00000001); break; case NV_44: NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | From 03c0490129816b5f5b40855438e948fdae572d06 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 21 Aug 2007 02:23:21 +1000 Subject: [PATCH 280/437] nouveau: Add NV44 ctx ucode. Patch from stillunknown. Microcode is similar enough to the NV4A one that it should be able to use the same initial PGRAPH context. One day this mess will go away, honest.. --- shared-core/nv40_graph.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index c1464bc2..0e6028af 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1249,6 +1249,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) ctx_size = NV49_GRCTX_SIZE; ctx_init = nv49_graph_context_init; break; + case 0x44: case 0x4a: ctx_size = NV4A_GRCTX_SIZE; ctx_init = nv4a_graph_context_init; @@ -1453,6 +1454,39 @@ static uint32_t nv43_ctx_voodoo[] = { ~0 }; +static uint32_t nv44_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06, + 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, + 0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, + 0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b, + 0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d, + 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6, + 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158, + 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, + 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, + 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f, + 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec, + 0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a, + 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691, + 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc, + 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, + 0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901, + 0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, + 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, + 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8, + 0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001, + 0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029, + 0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a, + 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, + 0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000, + 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007, + 0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, + 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, + 0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05, + 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 +}; + static uint32_t nv46_ctx_voodoo[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, @@ -1609,6 +1643,7 @@ nv40_graph_init(struct drm_device *dev) switch (dev_priv->chipset) { case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; + case 0x44: ctx_voodoo = nv44_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; From 76337bdb19fb6a098fc6d6ceaafb58a4ed15f9b0 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Mon, 6 Aug 2007 17:42:31 +0200 Subject: [PATCH 281/437] nouveau: fix the comment and debug message for PCIGART size --- shared-core/nouveau_mem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 12d1ba75..2cc0ed77 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -439,11 +439,11 @@ int nouveau_mem_init(struct drm_device *dev) struct drm_scatter_gather sgreq; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); - sgreq.size = 16 << 20; //4MB of PCI scatter-gather zone + sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone if (drm_sg_alloc(dev, &sgreq)) { - DRM_ERROR("Unable to allocate 4MB of scatter-gather" - " pages for PCI DMA!"); + DRM_ERROR("Unable to allocate %dMB of scatter-gather" + " pages for PCI DMA!",sgreq.size>>20); } else { if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, dev->sg->pages * PAGE_SIZE)) { From c8ee6a6cabbd44c06e382f99c2691d3efe46b984 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Wed, 22 Aug 2007 04:20:09 +0200 Subject: [PATCH 282/437] nouveau: redo nv30_graph.c. Should work better, but we still lack a couple of cards. --- shared-core/nv30_graph.c | 2818 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 2722 insertions(+), 96 deletions(-) diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 161f3154..ca43bb95 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -8,97 +8,2703 @@ #include "nouveau_drm.h" /* - * This is obviously not the correct size. + * There are 4 families : + * NV30 is 0x10de:0x030* (not working, no dump for that one) + * + * NV31 is 0x10de:0x031* + * + * NV34 is 0x10de:0x032* + * + * NV35 is 0x10de:0x033* (NV35 and NV36 are the same) + * NV36 is 0x10de:0x034* + * + * Not seen in the wild, no dumps (probably NV35) : + * NV37 is 0x10de:0x00fc, 0x10de:0x00fd + * NV38 is 0x10de:0x0333, 0x10de:0x00fe + * */ -#define NV30_GRCTX_SIZE (23840) -/*TODO: deciper what each offset in the context represents. The below - * contexts are taken from dumps just after the 3D object is - * created. - */ -static void nv30_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) + +#define NV31_GRCTX_SIZE (22392) +#define NV34_GRCTX_SIZE (18140) +#define NV35_GRCTX_SIZE (22396) + +static void nv31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; - - INSTANCE_WR(ctx, 0x28/4, 0x10000000); - INSTANCE_WR(ctx, 0x40c/4, 0x00000101); - INSTANCE_WR(ctx, 0x420/4, 0x00000111); - INSTANCE_WR(ctx, 0x424/4, 0x00000060); - INSTANCE_WR(ctx, 0x440/4, 0x00000080); - INSTANCE_WR(ctx, 0x444/4, 0xffff0000); - INSTANCE_WR(ctx, 0x448/4, 0x00000001); - INSTANCE_WR(ctx, 0x45c/4, 0x44400000); - INSTANCE_WR(ctx, 0x448/4, 0xffff0000); - INSTANCE_WR(ctx, 0x4dc/4, 0xfff00000); - INSTANCE_WR(ctx, 0x4e0/4, 0xfff00000); - INSTANCE_WR(ctx, 0x4e8/4, 0x00011100); - for (i = 0x504; i <= 0x540; i += 4) - INSTANCE_WR(ctx, i/4, 0x7ff00000); + INSTANCE_WR(ctx, 0x410/4, 0x00000101); + INSTANCE_WR(ctx, 0x424/4, 0x00000111); + INSTANCE_WR(ctx, 0x428/4, 0x00000060); + INSTANCE_WR(ctx, 0x444/4, 0x00000080); + INSTANCE_WR(ctx, 0x448/4, 0xffff0000); + INSTANCE_WR(ctx, 0x44c/4, 0x00000001); + INSTANCE_WR(ctx, 0x460/4, 0x44400000); + INSTANCE_WR(ctx, 0x48c/4, 0xffff0000); + for(i = 0x4e0; i< 0x4e8; i += 4) + INSTANCE_WR(ctx, i/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x4ec/4, 0x00011100); + for(i = 0x508; i< 0x548; i += 4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x58c/4, 0x00000080); + INSTANCE_WR(ctx, 0x590/4, 0x30201000); + INSTANCE_WR(ctx, 0x594/4, 0x70605040); + INSTANCE_WR(ctx, 0x598/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000); + for(i = 0x600; i< 0x640; i += 4) + INSTANCE_WR(ctx, i/4, 0x00010588); + for(i = 0x640; i< 0x680; i += 4) + INSTANCE_WR(ctx, i/4, 0x00030303); + for(i = 0x6c0; i< 0x700; i += 4) + INSTANCE_WR(ctx, i/4, 0x0008aae4); + for(i = 0x700; i< 0x740; i += 4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for(i = 0x740; i< 0x780; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x85c/4, 0x00040000); + INSTANCE_WR(ctx, 0x860/4, 0x00010000); + for(i = 0x864; i< 0x874; i += 4) + INSTANCE_WR(ctx, i/4, 0x00040004); + INSTANCE_WR(ctx, 0x1f18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fa0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fa8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fb0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fb8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fbc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fc0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fc8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fcc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fd0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fd8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fdc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fe0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fe8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ff0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ff8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ffc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2000/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2008/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x200c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2010/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2018/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x201c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2020/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2028/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x202c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2030/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2038/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x203c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2040/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2048/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x204c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2050/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2058/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x205c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2060/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2068/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x206c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2070/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2078/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x207c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2080/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2088/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x208c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2090/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2098/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x209c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2100/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2108/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x210c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2110/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2118/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x211c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2120/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2128/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x212c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2130/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2138/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x213c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2140/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2148/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x214c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2150/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2158/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x215c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2160/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2168/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x216c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2170/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2178/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x217c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2180/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2188/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x218c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2190/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2198/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x219c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2200/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2208/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x220c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2210/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2218/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x221c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2220/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2228/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x222c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2230/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2238/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x223c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2240/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2248/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x224c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2250/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2258/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x225c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2260/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2268/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x226c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2270/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2278/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x227c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2280/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2288/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x228c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2290/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2298/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x229c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2300/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2308/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x230c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2310/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2318/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x231c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2320/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2328/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x232c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2330/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2338/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x233c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2340/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2348/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x234c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2350/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2358/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x235c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2360/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2368/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x236c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2370/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2378/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x237c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2380/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2388/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x238c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2390/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2398/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x239c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2400/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2408/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x240c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2410/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2418/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x241c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2420/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2428/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x242c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2430/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2438/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x243c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2440/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2448/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x244c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2450/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2458/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x245c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2460/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2468/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x246c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2470/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2478/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x247c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2480/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2488/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x248c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2490/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2498/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x249c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2500/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2508/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x250c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2510/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2518/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x251c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2520/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2528/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x252c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2530/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2538/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x253c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2540/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2548/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x254c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2550/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2558/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x255c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2560/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2568/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x256c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2570/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2578/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x257c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2580/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2588/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x258c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2590/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2598/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x259c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2600/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2608/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x260c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2610/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2618/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x261c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2620/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2628/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x262c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2630/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2638/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x263c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2640/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2648/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x264c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2650/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2658/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x265c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2660/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2668/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x266c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2670/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2678/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x267c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2680/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2688/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x268c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2690/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2698/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x269c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2700/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2708/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x270c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2710/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2718/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x271c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2720/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2728/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x272c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2730/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2738/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x273c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2740/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2748/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x274c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2750/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2758/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x275c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2760/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2768/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x276c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2770/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2778/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x277c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2780/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2788/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x278c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2790/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2798/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x279c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2800/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2808/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x280c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2810/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2818/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x281c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2820/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2828/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x282c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2830/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2838/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x283c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2840/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2848/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x284c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2850/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2858/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x285c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2860/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2868/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x286c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2870/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2878/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x287c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2880/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2888/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x288c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2890/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2898/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x289c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2900/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2908/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x290c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2910/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2918/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x291c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2920/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2928/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x292c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2930/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2938/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x293c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2940/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2948/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x294c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2950/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2958/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x295c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2960/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2968/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x296c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2970/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2978/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x297c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2980/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2988/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x298c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2990/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2998/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x299c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29a0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29a8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29ac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29b0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29b8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29bc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29c0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29c8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29cc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29d0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29d8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29dc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29e0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29e8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29ec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29f0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29f8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29fc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a00/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a08/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a0c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a10/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2aa0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2aa8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2aac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ab0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ab8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2abc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ac0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ac8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2acc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ad0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ad8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2adc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ae0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ae8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2aec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2af0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2af8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2afc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b00/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b08/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b0c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b10/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ba0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ba8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bb0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bb8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bbc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bc0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bc8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bcc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bd0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bd8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bdc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2be0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2be8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bf0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bf8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bfc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c00/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c08/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c0c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c10/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ca0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ca8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cb0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cb8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cbc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cc0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cc8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ccc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cd0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cd8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cdc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ce0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ce8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cf0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cf8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cfc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d00/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d08/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d0c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d10/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2da0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2da8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2db0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2db8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dbc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2dc0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dc8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dcc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2dd0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dd8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ddc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2de0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2de8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2df0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2df8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dfc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e00/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e08/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e0c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e10/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ea0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ea8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2eac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2eb0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2eb8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ebc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ec0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ec8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ecc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ed0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ed8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2edc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ee0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ee8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2eec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ef0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ef8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2efc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f00/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f08/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f0c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f10/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f18/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f1c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f20/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f28/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f2c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f30/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f38/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f3c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f40/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f48/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f4c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f50/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f58/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f5c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f60/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f68/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f6c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f70/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f78/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f7c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f80/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f88/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f8c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f90/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f98/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f9c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fa0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fa8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fac/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fb0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fb8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fbc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fc0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fc8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fcc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fd0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fd8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fdc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fe0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fe8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fec/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ff0/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ff8/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ffc/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3000/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3008/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x300c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3010/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3018/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x301c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3020/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3028/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x302c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3030/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3038/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x303c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3040/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3048/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x304c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3050/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3058/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x305c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3060/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3068/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x306c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3070/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3078/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x307c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3080/4, 0x000c001b); + INSTANCE_WR(ctx, 0x3088/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x308c/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3090/4, 0x000c001b); + for(i = 0x30b8; i< 0x30c8; i += 4) + INSTANCE_WR(ctx, i/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x344c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3808/4, 0x3f800000); + INSTANCE_WR(ctx, 0x381c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3848/4, 0x40000000); + INSTANCE_WR(ctx, 0x384c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3850/4, 0x3f000000); + INSTANCE_WR(ctx, 0x3858/4, 0x40000000); + INSTANCE_WR(ctx, 0x385c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3864/4, 0xbf800000); + INSTANCE_WR(ctx, 0x386c/4, 0xbf800000);} - INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff); - INSTANCE_WR(ctx, 0x588/4, 0x00000080); - INSTANCE_WR(ctx, 0x58c/4, 0x30201000); - INSTANCE_WR(ctx, 0x590/4, 0x70605040); - INSTANCE_WR(ctx, 0x594/4, 0xb8a89888); - INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8); - INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000); +static void nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; - for (i = 0x604; i <= 0x640; i += 4) - INSTANCE_WR(ctx, i/4, 0x00010588); - - for (i = 0x644; i <= 0x680; i += 4) - INSTANCE_WR(ctx, i/4, 0x00030303); - - for (i = 0x6c4; i <= 0x700; i += 4) - INSTANCE_WR(ctx, i/4, 0x0008aae4); - - for (i = 0x704; i <= 0x740; i += 4) - INSTANCE_WR(ctx, i/4, 0x1012000); - - for (i = 0x744; i <= 0x780; i += 4) - INSTANCE_WR(ctx, i/4, 0x0080008); - - INSTANCE_WR(ctx, 0x860/4, 0x00040000); - INSTANCE_WR(ctx, 0x864/4, 0x00010000); - INSTANCE_WR(ctx, 0x868/4, 0x00040000); - INSTANCE_WR(ctx, 0x86c/4, 0x00040000); - INSTANCE_WR(ctx, 0x870/4, 0x00040000); - INSTANCE_WR(ctx, 0x874/4, 0x00040000); - - for (i = 0x00; i <= 0x1170; i += 0x10) - { - INSTANCE_WR(ctx, (0x1f24 + i)/4, 0x000c001b); - INSTANCE_WR(ctx, (0x1f20 + i)/4, 0x0436086c); - INSTANCE_WR(ctx, (0x1f1c + i)/4, 0x10700ff9); - } - - INSTANCE_WR(ctx, 0x30bc/4, 0x0000ffff); - INSTANCE_WR(ctx, 0x30c0/4, 0x0000ffff); - INSTANCE_WR(ctx, 0x30c4/4, 0x0000ffff); - INSTANCE_WR(ctx, 0x30c8/4, 0x0000ffff); - - INSTANCE_WR(ctx, 0x380c/4, 0x3f800000); - INSTANCE_WR(ctx, 0x3450/4, 0x3f800000); - INSTANCE_WR(ctx, 0x3820/4, 0x3f800000); - INSTANCE_WR(ctx, 0x3854/4, 0x3f800000); - INSTANCE_WR(ctx, 0x3850/4, 0x3f000000); - INSTANCE_WR(ctx, 0x384c/4, 0x40000000); - INSTANCE_WR(ctx, 0x3868/4, 0xbf800000); - INSTANCE_WR(ctx, 0x3860/4, 0x3f800000); - INSTANCE_WR(ctx, 0x386c/4, 0x40000000); - INSTANCE_WR(ctx, 0x3870/4, 0xbf800000); - - for (i = 0x4e0; i <= 0x4e1c; i += 4) - INSTANCE_WR(ctx, i/4, 0x001c527d); - INSTANCE_WR(ctx, 0x4e40, 0x001c527c); - - INSTANCE_WR(ctx, 0x5680/4, 0x000a0000); - INSTANCE_WR(ctx, 0x87c/4, 0x10000000); - INSTANCE_WR(ctx, 0x28/4, 0x10000011); + INSTANCE_WR(ctx, 0x40c/4, 0x01000101); + INSTANCE_WR(ctx, 0x420/4, 0x00000111); + INSTANCE_WR(ctx, 0x424/4, 0x00000060); + INSTANCE_WR(ctx, 0x440/4, 0x00000080); + INSTANCE_WR(ctx, 0x444/4, 0xffff0000); + INSTANCE_WR(ctx, 0x448/4, 0x00000001); + INSTANCE_WR(ctx, 0x45c/4, 0x44400000); + INSTANCE_WR(ctx, 0x480/4, 0xffff0000); + for(i = 0x4d4; i< 0x4dc; i += 4) + INSTANCE_WR(ctx, i/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x4e0/4, 0x00011100); + for(i = 0x4fc; i< 0x53c; i += 4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x57c/4, 0x00000080); + INSTANCE_WR(ctx, 0x580/4, 0x30201000); + INSTANCE_WR(ctx, 0x584/4, 0x70605040); + INSTANCE_WR(ctx, 0x588/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000); + for(i = 0x5f0; i< 0x630; i += 4) + INSTANCE_WR(ctx, i/4, 0x00010588); + for(i = 0x630; i< 0x670; i += 4) + INSTANCE_WR(ctx, i/4, 0x00030303); + for(i = 0x6b0; i< 0x6f0; i += 4) + INSTANCE_WR(ctx, i/4, 0x0008aae4); + for(i = 0x6f0; i< 0x730; i += 4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for(i = 0x730; i< 0x770; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x850/4, 0x00040000); + INSTANCE_WR(ctx, 0x854/4, 0x00010000); + for(i = 0x858; i< 0x868; i += 4) + INSTANCE_WR(ctx, i/4, 0x00040004); + INSTANCE_WR(ctx, 0x15ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x15b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x15b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x15bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x15c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x15c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x15cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x15d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x15d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x15dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x15e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x15e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x15ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x15f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x15f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x15fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1600/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1604/4, 0x000c001b); + INSTANCE_WR(ctx, 0x160c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1610/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1614/4, 0x000c001b); + INSTANCE_WR(ctx, 0x161c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1620/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1624/4, 0x000c001b); + INSTANCE_WR(ctx, 0x162c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1630/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1634/4, 0x000c001b); + INSTANCE_WR(ctx, 0x163c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1640/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1644/4, 0x000c001b); + INSTANCE_WR(ctx, 0x164c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1650/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1654/4, 0x000c001b); + INSTANCE_WR(ctx, 0x165c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1660/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1664/4, 0x000c001b); + INSTANCE_WR(ctx, 0x166c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1670/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1674/4, 0x000c001b); + INSTANCE_WR(ctx, 0x167c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1680/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1684/4, 0x000c001b); + INSTANCE_WR(ctx, 0x168c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1690/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1694/4, 0x000c001b); + INSTANCE_WR(ctx, 0x169c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x16a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x16a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x16ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x16b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x16b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x16bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x16c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x16c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x16cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x16d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x16d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x16dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x16e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x16e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x16ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x16f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x16f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x16fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1700/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1704/4, 0x000c001b); + INSTANCE_WR(ctx, 0x170c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1710/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1714/4, 0x000c001b); + INSTANCE_WR(ctx, 0x171c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1720/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1724/4, 0x000c001b); + INSTANCE_WR(ctx, 0x172c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1730/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1734/4, 0x000c001b); + INSTANCE_WR(ctx, 0x173c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1740/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1744/4, 0x000c001b); + INSTANCE_WR(ctx, 0x174c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1750/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1754/4, 0x000c001b); + INSTANCE_WR(ctx, 0x175c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1760/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1764/4, 0x000c001b); + INSTANCE_WR(ctx, 0x176c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1770/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1774/4, 0x000c001b); + INSTANCE_WR(ctx, 0x177c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1780/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1784/4, 0x000c001b); + INSTANCE_WR(ctx, 0x178c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1790/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1794/4, 0x000c001b); + INSTANCE_WR(ctx, 0x179c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x17a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x17a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x17ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x17b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x17b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x17bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x17c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x17c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x17cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x17d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x17d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x17dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x17e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x17e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x17ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x17f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x17f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x17fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1800/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1804/4, 0x000c001b); + INSTANCE_WR(ctx, 0x180c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1810/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1814/4, 0x000c001b); + INSTANCE_WR(ctx, 0x181c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1820/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1824/4, 0x000c001b); + INSTANCE_WR(ctx, 0x182c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1830/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1834/4, 0x000c001b); + INSTANCE_WR(ctx, 0x183c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1840/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1844/4, 0x000c001b); + INSTANCE_WR(ctx, 0x184c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1850/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1854/4, 0x000c001b); + INSTANCE_WR(ctx, 0x185c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1860/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1864/4, 0x000c001b); + INSTANCE_WR(ctx, 0x186c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1870/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1874/4, 0x000c001b); + INSTANCE_WR(ctx, 0x187c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1880/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1884/4, 0x000c001b); + INSTANCE_WR(ctx, 0x188c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1890/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1894/4, 0x000c001b); + INSTANCE_WR(ctx, 0x189c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x18a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x18a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x18ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x18b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x18b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x18bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x18c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x18c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x18cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x18d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x18d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x18dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x18e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x18e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x18ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x18f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x18f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x18fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1900/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1904/4, 0x000c001b); + INSTANCE_WR(ctx, 0x190c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1910/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1914/4, 0x000c001b); + INSTANCE_WR(ctx, 0x191c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1920/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1924/4, 0x000c001b); + INSTANCE_WR(ctx, 0x192c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1930/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1934/4, 0x000c001b); + INSTANCE_WR(ctx, 0x193c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1940/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1944/4, 0x000c001b); + INSTANCE_WR(ctx, 0x194c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1950/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1954/4, 0x000c001b); + INSTANCE_WR(ctx, 0x195c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1960/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1964/4, 0x000c001b); + INSTANCE_WR(ctx, 0x196c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1970/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1974/4, 0x000c001b); + INSTANCE_WR(ctx, 0x197c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1980/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1984/4, 0x000c001b); + INSTANCE_WR(ctx, 0x198c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1990/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1994/4, 0x000c001b); + INSTANCE_WR(ctx, 0x199c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x19a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x19a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x19ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x19b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x19b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x19bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x19c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x19c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x19cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x19d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x19d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x19dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x19e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x19e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x19ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x19f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x19f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x19fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1a90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1a94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1a9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1aa0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1aa4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1aac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ab0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ab4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1abc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ac0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ac4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1acc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ad0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ad4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1adc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ae0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ae4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1aec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1af0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1af4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1afc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1b90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1b94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1b9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ba0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ba4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1bac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1bb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1bb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1bbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1bc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1bc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1bcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1bd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1bd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1bdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1be0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1be4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1bec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1bf0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1bf4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1bfc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1c90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1c94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1c9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ca0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ca4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1cac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1cb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1cb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1cbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1cc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1cc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ccc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1cd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1cd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1cdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ce0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ce4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1cec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1cf0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1cf4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1cfc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1d90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1d94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1d9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1da0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1da4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1dac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1db0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1db4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1dbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1dc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1dc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1dcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1dd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1dd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ddc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1de0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1de4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1dec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1df0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1df4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1dfc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1e90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1e94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1e9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ea0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ea4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1eac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1eb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1eb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ebc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ec0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ec4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ecc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ed0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ed4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1edc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ee0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ee4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1eec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ef0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ef4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1efc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fa0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fa4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fe0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fe4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ff0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ff4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ffc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2000/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2004/4, 0x000c001b); + INSTANCE_WR(ctx, 0x200c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2010/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2014/4, 0x000c001b); + INSTANCE_WR(ctx, 0x201c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2020/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2024/4, 0x000c001b); + INSTANCE_WR(ctx, 0x202c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2030/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2034/4, 0x000c001b); + INSTANCE_WR(ctx, 0x203c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2040/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2044/4, 0x000c001b); + INSTANCE_WR(ctx, 0x204c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2050/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2054/4, 0x000c001b); + INSTANCE_WR(ctx, 0x205c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2060/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2064/4, 0x000c001b); + INSTANCE_WR(ctx, 0x206c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2070/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2074/4, 0x000c001b); + INSTANCE_WR(ctx, 0x207c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2080/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2084/4, 0x000c001b); + INSTANCE_WR(ctx, 0x208c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2090/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2094/4, 0x000c001b); + INSTANCE_WR(ctx, 0x209c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2100/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2104/4, 0x000c001b); + INSTANCE_WR(ctx, 0x210c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2110/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2114/4, 0x000c001b); + INSTANCE_WR(ctx, 0x211c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2120/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2124/4, 0x000c001b); + INSTANCE_WR(ctx, 0x212c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2130/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2134/4, 0x000c001b); + INSTANCE_WR(ctx, 0x213c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2140/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2144/4, 0x000c001b); + INSTANCE_WR(ctx, 0x214c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2150/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2154/4, 0x000c001b); + INSTANCE_WR(ctx, 0x215c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2160/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2164/4, 0x000c001b); + INSTANCE_WR(ctx, 0x216c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2170/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2174/4, 0x000c001b); + INSTANCE_WR(ctx, 0x217c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2180/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2184/4, 0x000c001b); + INSTANCE_WR(ctx, 0x218c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2190/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2194/4, 0x000c001b); + INSTANCE_WR(ctx, 0x219c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2200/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2204/4, 0x000c001b); + INSTANCE_WR(ctx, 0x220c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2210/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2214/4, 0x000c001b); + INSTANCE_WR(ctx, 0x221c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2220/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2224/4, 0x000c001b); + INSTANCE_WR(ctx, 0x222c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2230/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2234/4, 0x000c001b); + INSTANCE_WR(ctx, 0x223c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2240/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2244/4, 0x000c001b); + INSTANCE_WR(ctx, 0x224c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2250/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2254/4, 0x000c001b); + INSTANCE_WR(ctx, 0x225c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2260/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2264/4, 0x000c001b); + INSTANCE_WR(ctx, 0x226c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2270/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2274/4, 0x000c001b); + INSTANCE_WR(ctx, 0x227c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2280/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2284/4, 0x000c001b); + INSTANCE_WR(ctx, 0x228c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2290/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2294/4, 0x000c001b); + INSTANCE_WR(ctx, 0x229c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2300/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2304/4, 0x000c001b); + INSTANCE_WR(ctx, 0x230c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2310/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2314/4, 0x000c001b); + INSTANCE_WR(ctx, 0x231c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2320/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2324/4, 0x000c001b); + INSTANCE_WR(ctx, 0x232c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2330/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2334/4, 0x000c001b); + INSTANCE_WR(ctx, 0x233c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2340/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2344/4, 0x000c001b); + INSTANCE_WR(ctx, 0x234c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2350/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2354/4, 0x000c001b); + INSTANCE_WR(ctx, 0x235c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2360/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2364/4, 0x000c001b); + INSTANCE_WR(ctx, 0x236c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2370/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2374/4, 0x000c001b); + INSTANCE_WR(ctx, 0x237c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2380/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2384/4, 0x000c001b); + INSTANCE_WR(ctx, 0x238c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2390/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2394/4, 0x000c001b); + INSTANCE_WR(ctx, 0x239c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2400/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2404/4, 0x000c001b); + INSTANCE_WR(ctx, 0x240c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2410/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2414/4, 0x000c001b); + INSTANCE_WR(ctx, 0x241c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2420/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2424/4, 0x000c001b); + INSTANCE_WR(ctx, 0x242c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2430/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2434/4, 0x000c001b); + INSTANCE_WR(ctx, 0x243c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2440/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2444/4, 0x000c001b); + INSTANCE_WR(ctx, 0x244c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2450/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2454/4, 0x000c001b); + INSTANCE_WR(ctx, 0x245c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2460/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2464/4, 0x000c001b); + INSTANCE_WR(ctx, 0x246c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2470/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2474/4, 0x000c001b); + INSTANCE_WR(ctx, 0x247c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2480/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2484/4, 0x000c001b); + INSTANCE_WR(ctx, 0x248c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2490/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2494/4, 0x000c001b); + INSTANCE_WR(ctx, 0x249c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2500/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2504/4, 0x000c001b); + INSTANCE_WR(ctx, 0x250c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2510/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2514/4, 0x000c001b); + INSTANCE_WR(ctx, 0x251c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2520/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2524/4, 0x000c001b); + INSTANCE_WR(ctx, 0x252c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2530/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2534/4, 0x000c001b); + INSTANCE_WR(ctx, 0x253c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2540/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2544/4, 0x000c001b); + INSTANCE_WR(ctx, 0x254c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2550/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2554/4, 0x000c001b); + INSTANCE_WR(ctx, 0x255c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2560/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2564/4, 0x000c001b); + INSTANCE_WR(ctx, 0x256c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2570/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2574/4, 0x000c001b); + INSTANCE_WR(ctx, 0x257c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2580/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2584/4, 0x000c001b); + INSTANCE_WR(ctx, 0x258c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2590/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2594/4, 0x000c001b); + INSTANCE_WR(ctx, 0x259c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2600/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2604/4, 0x000c001b); + INSTANCE_WR(ctx, 0x260c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2610/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2614/4, 0x000c001b); + INSTANCE_WR(ctx, 0x261c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2620/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2624/4, 0x000c001b); + INSTANCE_WR(ctx, 0x262c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2630/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2634/4, 0x000c001b); + INSTANCE_WR(ctx, 0x263c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2640/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2644/4, 0x000c001b); + INSTANCE_WR(ctx, 0x264c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2650/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2654/4, 0x000c001b); + INSTANCE_WR(ctx, 0x265c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2660/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2664/4, 0x000c001b); + INSTANCE_WR(ctx, 0x266c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2670/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2674/4, 0x000c001b); + INSTANCE_WR(ctx, 0x267c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2680/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2684/4, 0x000c001b); + INSTANCE_WR(ctx, 0x268c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2690/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2694/4, 0x000c001b); + INSTANCE_WR(ctx, 0x269c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2700/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2704/4, 0x000c001b); + INSTANCE_WR(ctx, 0x270c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2710/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2714/4, 0x000c001b); + INSTANCE_WR(ctx, 0x271c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2720/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2724/4, 0x000c001b); + for(i = 0x274c; i< 0x275c; i += 4) + INSTANCE_WR(ctx, i/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x2edc/4, 0x40000000); + INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000); + INSTANCE_WR(ctx, 0x2eec/4, 0x40000000); + INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000); + INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000); } +static void nv35_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x40c/4, 0x00000101); + INSTANCE_WR(ctx, 0x420/4, 0x00000111); + INSTANCE_WR(ctx, 0x424/4, 0x00000060); + INSTANCE_WR(ctx, 0x440/4, 0x00000080); + INSTANCE_WR(ctx, 0x444/4, 0xffff0000); + INSTANCE_WR(ctx, 0x448/4, 0x00000001); + INSTANCE_WR(ctx, 0x45c/4, 0x44400000); + INSTANCE_WR(ctx, 0x488/4, 0xffff0000); + for(i = 0x4dc; i< 0x4e4; i += 4) + INSTANCE_WR(ctx, i/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x4e8/4, 0x00011100); + for(i = 0x504; i< 0x544; i += 4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x588/4, 0x00000080); + INSTANCE_WR(ctx, 0x58c/4, 0x30201000); + INSTANCE_WR(ctx, 0x590/4, 0x70605040); + INSTANCE_WR(ctx, 0x594/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000); + for(i = 0x604; i< 0x644; i += 4) + INSTANCE_WR(ctx, i/4, 0x00010588); + for(i = 0x644; i< 0x684; i += 4) + INSTANCE_WR(ctx, i/4, 0x00030303); + for(i = 0x6c4; i< 0x704; i += 4) + INSTANCE_WR(ctx, i/4, 0x0008aae4); + for(i = 0x704; i< 0x744; i += 4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for(i = 0x744; i< 0x784; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x860/4, 0x00040000); + INSTANCE_WR(ctx, 0x864/4, 0x00010000); + for(i = 0x868; i< 0x878; i += 4) + INSTANCE_WR(ctx, i/4, 0x00040004); + INSTANCE_WR(ctx, 0x1f1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1f90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1f94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1f9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fa0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fa4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1fe0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1fe4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1fec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x1ff0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x1ff4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x1ffc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2000/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2004/4, 0x000c001b); + INSTANCE_WR(ctx, 0x200c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2010/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2014/4, 0x000c001b); + INSTANCE_WR(ctx, 0x201c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2020/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2024/4, 0x000c001b); + INSTANCE_WR(ctx, 0x202c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2030/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2034/4, 0x000c001b); + INSTANCE_WR(ctx, 0x203c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2040/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2044/4, 0x000c001b); + INSTANCE_WR(ctx, 0x204c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2050/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2054/4, 0x000c001b); + INSTANCE_WR(ctx, 0x205c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2060/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2064/4, 0x000c001b); + INSTANCE_WR(ctx, 0x206c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2070/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2074/4, 0x000c001b); + INSTANCE_WR(ctx, 0x207c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2080/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2084/4, 0x000c001b); + INSTANCE_WR(ctx, 0x208c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2090/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2094/4, 0x000c001b); + INSTANCE_WR(ctx, 0x209c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x20f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x20f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x20fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2100/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2104/4, 0x000c001b); + INSTANCE_WR(ctx, 0x210c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2110/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2114/4, 0x000c001b); + INSTANCE_WR(ctx, 0x211c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2120/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2124/4, 0x000c001b); + INSTANCE_WR(ctx, 0x212c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2130/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2134/4, 0x000c001b); + INSTANCE_WR(ctx, 0x213c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2140/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2144/4, 0x000c001b); + INSTANCE_WR(ctx, 0x214c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2150/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2154/4, 0x000c001b); + INSTANCE_WR(ctx, 0x215c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2160/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2164/4, 0x000c001b); + INSTANCE_WR(ctx, 0x216c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2170/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2174/4, 0x000c001b); + INSTANCE_WR(ctx, 0x217c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2180/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2184/4, 0x000c001b); + INSTANCE_WR(ctx, 0x218c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2190/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2194/4, 0x000c001b); + INSTANCE_WR(ctx, 0x219c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x21f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x21f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x21fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2200/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2204/4, 0x000c001b); + INSTANCE_WR(ctx, 0x220c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2210/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2214/4, 0x000c001b); + INSTANCE_WR(ctx, 0x221c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2220/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2224/4, 0x000c001b); + INSTANCE_WR(ctx, 0x222c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2230/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2234/4, 0x000c001b); + INSTANCE_WR(ctx, 0x223c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2240/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2244/4, 0x000c001b); + INSTANCE_WR(ctx, 0x224c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2250/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2254/4, 0x000c001b); + INSTANCE_WR(ctx, 0x225c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2260/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2264/4, 0x000c001b); + INSTANCE_WR(ctx, 0x226c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2270/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2274/4, 0x000c001b); + INSTANCE_WR(ctx, 0x227c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2280/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2284/4, 0x000c001b); + INSTANCE_WR(ctx, 0x228c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2290/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2294/4, 0x000c001b); + INSTANCE_WR(ctx, 0x229c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x22f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x22f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x22fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2300/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2304/4, 0x000c001b); + INSTANCE_WR(ctx, 0x230c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2310/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2314/4, 0x000c001b); + INSTANCE_WR(ctx, 0x231c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2320/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2324/4, 0x000c001b); + INSTANCE_WR(ctx, 0x232c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2330/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2334/4, 0x000c001b); + INSTANCE_WR(ctx, 0x233c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2340/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2344/4, 0x000c001b); + INSTANCE_WR(ctx, 0x234c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2350/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2354/4, 0x000c001b); + INSTANCE_WR(ctx, 0x235c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2360/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2364/4, 0x000c001b); + INSTANCE_WR(ctx, 0x236c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2370/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2374/4, 0x000c001b); + INSTANCE_WR(ctx, 0x237c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2380/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2384/4, 0x000c001b); + INSTANCE_WR(ctx, 0x238c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2390/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2394/4, 0x000c001b); + INSTANCE_WR(ctx, 0x239c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x23f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x23f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x23fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2400/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2404/4, 0x000c001b); + INSTANCE_WR(ctx, 0x240c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2410/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2414/4, 0x000c001b); + INSTANCE_WR(ctx, 0x241c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2420/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2424/4, 0x000c001b); + INSTANCE_WR(ctx, 0x242c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2430/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2434/4, 0x000c001b); + INSTANCE_WR(ctx, 0x243c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2440/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2444/4, 0x000c001b); + INSTANCE_WR(ctx, 0x244c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2450/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2454/4, 0x000c001b); + INSTANCE_WR(ctx, 0x245c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2460/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2464/4, 0x000c001b); + INSTANCE_WR(ctx, 0x246c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2470/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2474/4, 0x000c001b); + INSTANCE_WR(ctx, 0x247c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2480/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2484/4, 0x000c001b); + INSTANCE_WR(ctx, 0x248c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2490/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2494/4, 0x000c001b); + INSTANCE_WR(ctx, 0x249c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x24f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x24f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x24fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2500/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2504/4, 0x000c001b); + INSTANCE_WR(ctx, 0x250c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2510/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2514/4, 0x000c001b); + INSTANCE_WR(ctx, 0x251c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2520/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2524/4, 0x000c001b); + INSTANCE_WR(ctx, 0x252c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2530/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2534/4, 0x000c001b); + INSTANCE_WR(ctx, 0x253c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2540/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2544/4, 0x000c001b); + INSTANCE_WR(ctx, 0x254c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2550/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2554/4, 0x000c001b); + INSTANCE_WR(ctx, 0x255c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2560/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2564/4, 0x000c001b); + INSTANCE_WR(ctx, 0x256c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2570/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2574/4, 0x000c001b); + INSTANCE_WR(ctx, 0x257c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2580/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2584/4, 0x000c001b); + INSTANCE_WR(ctx, 0x258c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2590/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2594/4, 0x000c001b); + INSTANCE_WR(ctx, 0x259c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x25f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x25f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x25fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2600/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2604/4, 0x000c001b); + INSTANCE_WR(ctx, 0x260c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2610/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2614/4, 0x000c001b); + INSTANCE_WR(ctx, 0x261c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2620/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2624/4, 0x000c001b); + INSTANCE_WR(ctx, 0x262c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2630/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2634/4, 0x000c001b); + INSTANCE_WR(ctx, 0x263c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2640/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2644/4, 0x000c001b); + INSTANCE_WR(ctx, 0x264c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2650/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2654/4, 0x000c001b); + INSTANCE_WR(ctx, 0x265c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2660/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2664/4, 0x000c001b); + INSTANCE_WR(ctx, 0x266c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2670/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2674/4, 0x000c001b); + INSTANCE_WR(ctx, 0x267c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2680/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2684/4, 0x000c001b); + INSTANCE_WR(ctx, 0x268c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2690/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2694/4, 0x000c001b); + INSTANCE_WR(ctx, 0x269c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x26f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x26f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x26fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2700/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2704/4, 0x000c001b); + INSTANCE_WR(ctx, 0x270c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2710/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2714/4, 0x000c001b); + INSTANCE_WR(ctx, 0x271c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2720/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2724/4, 0x000c001b); + INSTANCE_WR(ctx, 0x272c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2730/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2734/4, 0x000c001b); + INSTANCE_WR(ctx, 0x273c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2740/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2744/4, 0x000c001b); + INSTANCE_WR(ctx, 0x274c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2750/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2754/4, 0x000c001b); + INSTANCE_WR(ctx, 0x275c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2760/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2764/4, 0x000c001b); + INSTANCE_WR(ctx, 0x276c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2770/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2774/4, 0x000c001b); + INSTANCE_WR(ctx, 0x277c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2780/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2784/4, 0x000c001b); + INSTANCE_WR(ctx, 0x278c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2790/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2794/4, 0x000c001b); + INSTANCE_WR(ctx, 0x279c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x27f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x27f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x27fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2800/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2804/4, 0x000c001b); + INSTANCE_WR(ctx, 0x280c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2810/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2814/4, 0x000c001b); + INSTANCE_WR(ctx, 0x281c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2820/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2824/4, 0x000c001b); + INSTANCE_WR(ctx, 0x282c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2830/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2834/4, 0x000c001b); + INSTANCE_WR(ctx, 0x283c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2840/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2844/4, 0x000c001b); + INSTANCE_WR(ctx, 0x284c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2850/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2854/4, 0x000c001b); + INSTANCE_WR(ctx, 0x285c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2860/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2864/4, 0x000c001b); + INSTANCE_WR(ctx, 0x286c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2870/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2874/4, 0x000c001b); + INSTANCE_WR(ctx, 0x287c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2880/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2884/4, 0x000c001b); + INSTANCE_WR(ctx, 0x288c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2890/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2894/4, 0x000c001b); + INSTANCE_WR(ctx, 0x289c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x28f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x28f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x28fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2900/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2904/4, 0x000c001b); + INSTANCE_WR(ctx, 0x290c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2910/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2914/4, 0x000c001b); + INSTANCE_WR(ctx, 0x291c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2920/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2924/4, 0x000c001b); + INSTANCE_WR(ctx, 0x292c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2930/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2934/4, 0x000c001b); + INSTANCE_WR(ctx, 0x293c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2940/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2944/4, 0x000c001b); + INSTANCE_WR(ctx, 0x294c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2950/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2954/4, 0x000c001b); + INSTANCE_WR(ctx, 0x295c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2960/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2964/4, 0x000c001b); + INSTANCE_WR(ctx, 0x296c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2970/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2974/4, 0x000c001b); + INSTANCE_WR(ctx, 0x297c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2980/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2984/4, 0x000c001b); + INSTANCE_WR(ctx, 0x298c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2990/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2994/4, 0x000c001b); + INSTANCE_WR(ctx, 0x299c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29a0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29a4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29ac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29b0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29b4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29bc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29c0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29c4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29cc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29d0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29d4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29dc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29e0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29e4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29ec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x29f0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x29f4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x29fc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2a90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2a94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2a9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2aa0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2aa4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2aac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ab0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ab4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2abc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ac0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ac4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2acc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ad0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ad4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2adc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ae0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ae4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2aec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2af0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2af4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2afc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2b90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2b94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2b9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ba0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ba4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2be0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2be4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2bf0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2bf4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2bfc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2c90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2c94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2c9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ca0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ca4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ccc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ce0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ce4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2cf0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2cf4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2cfc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2d90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2d94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2d9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2da0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2da4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2db0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2db4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2dc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2dd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2dd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ddc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2de0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2de4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2df0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2df4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2dfc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2e90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2e94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2e9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ea0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ea4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2eac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2eb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2eb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ebc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ec0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ec4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ecc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ed0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ed4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2edc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ee0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ee4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2eec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ef0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ef4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2efc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f00/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f04/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f0c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f10/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f14/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f1c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f20/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f24/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f2c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f30/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f34/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f3c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f40/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f44/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f4c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f50/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f54/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f5c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f60/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f64/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f6c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f70/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f74/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f7c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f80/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f84/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f8c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2f90/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2f94/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2f9c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fa0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fa4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fac/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fb0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fb4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fbc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fc0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fc4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fcc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fd0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fd4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fdc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2fe0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2fe4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2fec/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x2ff0/4, 0x0436086c); + INSTANCE_WR(ctx, 0x2ff4/4, 0x000c001b); + INSTANCE_WR(ctx, 0x2ffc/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3000/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3004/4, 0x000c001b); + INSTANCE_WR(ctx, 0x300c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3010/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3014/4, 0x000c001b); + INSTANCE_WR(ctx, 0x301c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3020/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3024/4, 0x000c001b); + INSTANCE_WR(ctx, 0x302c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3030/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3034/4, 0x000c001b); + INSTANCE_WR(ctx, 0x303c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3040/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3044/4, 0x000c001b); + INSTANCE_WR(ctx, 0x304c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3050/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3054/4, 0x000c001b); + INSTANCE_WR(ctx, 0x305c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3060/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3064/4, 0x000c001b); + INSTANCE_WR(ctx, 0x306c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3070/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3074/4, 0x000c001b); + INSTANCE_WR(ctx, 0x307c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3080/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3084/4, 0x000c001b); + INSTANCE_WR(ctx, 0x308c/4, 0x10700ff9); + INSTANCE_WR(ctx, 0x3090/4, 0x0436086c); + INSTANCE_WR(ctx, 0x3094/4, 0x000c001b); + for(i = 0x30bc; i< 0x30cc; i += 4) + INSTANCE_WR(ctx, i/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x3450/4, 0x3f800000); + INSTANCE_WR(ctx, 0x380c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3820/4, 0x3f800000); + INSTANCE_WR(ctx, 0x384c/4, 0x40000000); + INSTANCE_WR(ctx, 0x3850/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3854/4, 0x3f000000); + INSTANCE_WR(ctx, 0x385c/4, 0x40000000); + INSTANCE_WR(ctx, 0x3860/4, 0x3f800000); + INSTANCE_WR(ctx, 0x3868/4, 0xbf800000); + INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);} int nv30_graph_create_context(struct nouveau_channel *chan) { @@ -109,9 +2715,23 @@ int nv30_graph_create_context(struct nouveau_channel *chan) int ret; switch (dev_priv->chipset) { + case 0x31: + ctx_size = NV31_GRCTX_SIZE; + ctx_init = nv31_graph_context_init; + break; + case 0x34: + ctx_size = NV34_GRCTX_SIZE; + ctx_init = nv34_graph_context_init; + break; + case 0x35: + case 0x36: + ctx_size = NV35_GRCTX_SIZE; + ctx_init = nv35_graph_context_init; + break; default: - ctx_size = NV30_GRCTX_SIZE; - ctx_init = nv30_graph_context_init; + ctx_size = 0; + ctx_init = nv35_graph_context_init; + DRM_ERROR("Please contact the devs if you want your NV%x card to work\n",dev_priv->chipset); break; } @@ -122,10 +2742,10 @@ int nv30_graph_create_context(struct nouveau_channel *chan) /* Initialise default context values */ ctx_init(dev, chan->ramin_grctx->gpuobj); - - INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); /* CTX_USER */ - INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, - chan->ramin_grctx->instance >> 4); + + INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x28/4, (chan->id<<24)|0x1); /* CTX_USER */ + INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, + chan->ramin_grctx->instance >> 4); return 0; } @@ -221,18 +2841,24 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); - NV_WRITE(0x400890, 0x00140000); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0de0475); - NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x10008000); - NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04b1f36); + NV_WRITE(0x400890, 0x01b463ff); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf3de0471); + NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); + NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); NV_WRITE(0x400B80, 0x1003d888); - NV_WRITE(0x400B84, 0x0c000000); - NV_WRITE(0x400B88, 0x62ff0f7f); - NV_WRITE(0x400098, 0x000000c0); - NV_WRITE(0x40009C, 0x0005dc00); - NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f); + NV_WRITE(0x400098, 0x00000000); + NV_WRITE(0x40009C, 0x0005ad00); + NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2 NV_WRITE(0x4000a0, 0x00000000); NV_WRITE(0x4000a4, 0x00000008); + NV_WRITE(0x4008a8, 0xb784a400); + NV_WRITE(0x400ba0, 0x002f8685); + NV_WRITE(0x400ba4, 0x00231f3f); + NV_WRITE(0x4008a4, 0x40000020); + NV_WRITE(0x400B84, 0x0c000000); + NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f); + NV_WRITE(0x4000c0, 0x00000016); + NV_WRITE(0x400780, 0x000014e4); /* copy tile info from PFB */ for (i=0; i Date: Wed, 22 Aug 2007 12:54:26 +1000 Subject: [PATCH 283/437] nouveau/nv50: Correct thinko for 8800 chips + cleanup a bit. --- shared-core/nv50_fifo.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index 71b89d6d..7859544a 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -63,24 +63,17 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->fifos[channel]; + uint32_t inst; DRM_DEBUG("ch%d\n", channel); - if (IS_G80) { - if (!chan->ramin) - return -EINVAL; + if (!chan->ramfc) + return -EINVAL; - NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), - (chan->ramin->instance >> 12) | - NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); - } else { - if (!chan->ramfc) - return -EINVAL; - - NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), - (chan->ramfc->instance >> 8) | - NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); - } + if (IS_G80) inst = chan->ramfc->instance >> 12; + else inst = chan->ramfc->instance >> 8; + NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), + inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); if (!nt) nv50_fifo_init_thingo(dev); return 0; @@ -90,16 +83,13 @@ static void nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt) { struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst; DRM_DEBUG("ch%d, nt=%d\n", channel, nt); - if (IS_G80) { - NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), - NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80); - } else { - NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), - NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84); - } + if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; + else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; + NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst); if (!nt) nv50_fifo_init_thingo(dev); } @@ -234,7 +224,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan) if (IS_G80) { uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; - if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, ~0, 0x100, + uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start; + if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, + vram_offset, 0x100, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &ramfc, &chan->ramfc))) From 81eaff44c47cfb23e96b1cb848df5fd7ea24f913 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 22 Aug 2007 13:09:27 +1000 Subject: [PATCH 284/437] nouveau: NV4c ctx ucode. Seems we already have a nv4c_ctx_init() somehow, a quick check shows the ucode matches it still. --- shared-core/nv40_graph.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 0e6028af..8882e62b 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1584,6 +1584,37 @@ static uint32_t nv4a_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; +static uint32_t nv4c_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406, + 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, + 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, + 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6, + 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, + 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, + 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, + 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a, + 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, + 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, + 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, + 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, + 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a, + 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080, + 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168, + 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, + 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, + 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306, + 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 +}; + static uint32_t nv4e_ctx_voodoo[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, @@ -1648,6 +1679,7 @@ nv40_graph_init(struct drm_device *dev) case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; + case 0x4c: ctx_voodoo = nv4c_ctx_voodoo; break; case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; default: DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", From a654c0341a7892307522ed6e7f4518cc7e28a99e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 22 Aug 2007 13:17:19 +1000 Subject: [PATCH 285/437] nouveau/nv40: Dump extra info on ucode state if ctx switch fails. --- shared-core/nouveau_reg.h | 4 ++++ shared-core/nv40_graph.c | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 65614627..1023e75e 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -178,6 +178,10 @@ #define NV10_PGRAPH_CTX_CACHE5 0x004001E0 #define NV40_PGRAPH_CTXCTL_0304 0x00400304 #define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff #define NV40_PGRAPH_CTXCTL_0310 0x00400310 #define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 #define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 8882e62b..25ee5c77 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1310,7 +1310,11 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); if (i == tv) { - DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); + uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT); + DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save); + DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n", + ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, + ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(NV40_PGRAPH_CTXCTL_030C)); return -EBUSY; From 11c46afe7599cf3cefd30a7e55325a1a1aa8e5ba Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 22 Aug 2007 13:23:49 +1000 Subject: [PATCH 286/437] nouveau/nv40: Preserve other bits in 0x400304/0x400310 like NVIDIA do. --- shared-core/nv40_graph.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 25ee5c77..26237c7d 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1293,20 +1293,26 @@ static int nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t old_cp, tv = 1000; + uint32_t old_cp, tv = 1000, tmp; int i; old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); - NV_WRITE(NV40_PGRAPH_CTXCTL_0310, - save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : - NV40_PGRAPH_CTXCTL_0310_XFER_LOAD); - NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); + + tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310); + tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : + NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; + NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp); + + tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304); + tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; + NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp); for (i = 0; i < tv; i++) { if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) break; } + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); if (i == tv) { From 8645dac8952473dc3e09ba7a7a9db3fbdf75215f Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Wed, 22 Aug 2007 23:17:56 +0200 Subject: [PATCH 287/437] nouveau : fix some potential crashes with objects causing hash collision --- shared-core/nouveau_object.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e0cb334f..fbce7702 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -141,8 +141,13 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ref->channel, co, INSTANCE_RD(ramht, co/4)); co += 8; - if (co >= dev_priv->ramht_size) + if (co >= dev_priv->ramht_size) { + DRM_INFO("no space left after collision\n"); co = 0; + /* exit as it seems to cause crash with nouveau_demo and + * 0xdead0001 object */ + break; + } } while (co != ho); DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); From 98750111961a5729eba9433b927f8c24548fbace Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Thu, 23 Aug 2007 10:18:34 +0200 Subject: [PATCH 288/437] nouveau: nv10: check some NULL pointers inside context switch --- shared-core/nv10_graph.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 53b93758..567ce6a4 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -578,16 +578,40 @@ int nv10_graph_save_context(struct nouveau_channel *chan) void nouveau_nv10_context_switch(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv; struct nouveau_channel *next, *last; int chid; + if (!dev) { + DRM_DEBUG("Invalid drm_device\n"); + return; + } + dev_priv = dev->dev_private; + if (!dev_priv) { + DRM_DEBUG("Invalid drm_nouveau_private\n"); + return; + } + if (!dev_priv->fifos) { + DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); + return; + } + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); next = dev_priv->fifos[chid]; + if (!next) { + DRM_DEBUG("Invalid next channel\n"); + return; + } + chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); last = dev_priv->fifos[chid]; + if (!last) { + DRM_DEBUG("Invalid last channel\n"); + return; + } + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", last->id, next->id); @@ -607,7 +631,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) nouveau_wait_for_idle(dev); nv10_graph_load_context(next); - + NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); From 502bbdbe14fa458ed06c7fa4b1ccb63e4f126625 Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Sat, 25 Aug 2007 00:12:58 +0200 Subject: [PATCH 289/437] nouveau: nv10: output a warning if last channel invalid, and switch to next --- shared-core/nv10_graph.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 567ce6a4..18bab7eb 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -608,21 +608,23 @@ void nouveau_nv10_context_switch(struct drm_device *dev) last = dev_priv->fifos[chid]; if (!last) { - DRM_DEBUG("Invalid last channel\n"); - return; + DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", + next->id); + } else { + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", + last->id, next->id); } - DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", - last->id, next->id); - NV_WRITE(NV04_PGRAPH_FIFO,0x0); #if 0 NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); NV_WRITE(NV_PFIFO_CACHES, 0x00000000); #endif - nv10_graph_save_context(last); - + if (last) { + nv10_graph_save_context(last); + } + nouveau_wait_for_idle(dev); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); From 4182fce4084f4d884a7435b8ad2acb5c209f4544 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sat, 25 Aug 2007 22:10:45 +0200 Subject: [PATCH 290/437] nouveau : nv1x graph reworks - add forgotten init value - use the same PGRAPH_DEBUG than the blob - remove init of ddx reg : it should be done with object - better handle of channel destruction hope I didn't break anything ;) --- shared-core/nv10_graph.c | 67 ++++++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 18bab7eb..c604ff2e 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -181,11 +181,7 @@ static void nv10_praph_pipe(struct drm_device *dev) { nouveau_wait_for_idle(dev); } -/* TODO replace address with name - use loops */ static int nv10_graph_ctx_regs [] = { -NV03_PGRAPH_XY_LOGIC_MISC0, - NV10_PGRAPH_CTX_SWITCH1, NV10_PGRAPH_CTX_SWITCH2, NV10_PGRAPH_CTX_SWITCH3, @@ -455,6 +451,7 @@ NV03_PGRAPH_ABS_UCLIPA_YMIN, NV03_PGRAPH_ABS_UCLIPA_YMAX, NV03_PGRAPH_ABS_ICLIP_XMAX, NV03_PGRAPH_ABS_ICLIP_YMAX, +NV03_PGRAPH_XY_LOGIC_MISC0, NV03_PGRAPH_XY_LOGIC_MISC1, NV03_PGRAPH_XY_LOGIC_MISC2, NV03_PGRAPH_XY_LOGIC_MISC3, @@ -556,6 +553,7 @@ int nv10_graph_load_context(struct nouveau_channel *chan) for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]); } + NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24); return 0; } @@ -616,11 +614,6 @@ void nouveau_nv10_context_switch(struct drm_device *dev) } NV_WRITE(NV04_PGRAPH_FIFO,0x0); -#if 0 - NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); - NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); - NV_WRITE(NV_PFIFO_CACHES, 0x00000000); -#endif if (last) { nv10_graph_save_context(last); } @@ -635,13 +628,8 @@ void nouveau_nv10_context_switch(struct drm_device *dev) nv10_graph_load_context(next); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); - NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24); + //NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); -#if 0 - NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); - NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); - NV_WRITE(NV_PFIFO_CACHES, 0x00000001); -#endif NV_WRITE(NV04_PGRAPH_FIFO,0x1); } @@ -654,12 +642,14 @@ void nouveau_nv10_context_switch(struct drm_device *dev) int nv10_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t tmp, vramsz; DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx)); + /* mmio trace suggest that should be done in ddx with methods/objects */ +#if 0 + uint32_t tmp, vramsz; /* per channel init from ddx */ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; /*XXX the original ddx code, does this in 2 steps : @@ -684,12 +674,23 @@ int nv10_graph_create_context(struct nouveau_channel *chan) { NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); +#endif + NV_WRITE_CTX(0x00400e88, 0x08000000); + NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); - /* is it really needed ??? */ + NV_WRITE_CTX(0x00400e10, 0x00001000); + NV_WRITE_CTX(0x00400e14, 0x00001000); + NV_WRITE_CTX(0x00400e30, 0x00080008); + NV_WRITE_CTX(0x00400e34, 0x00080008); if (dev_priv->chipset>=0x17) { + /* is it really needed ??? */ NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); + NV_WRITE_CTX(0x00400eac, 0x0fff0000); + NV_WRITE_CTX(0x00400eb0, 0x0fff0000); + NV_WRITE_CTX(0x00400ec0, 0x00000080); + NV_WRITE_CTX(0x00400ed0, 0x00000080); } /* for the first channel init the regs */ @@ -705,6 +706,23 @@ int nv10_graph_create_context(struct nouveau_channel *chan) { void nv10_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chid; + chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + + /* does this avoid a potential context switch while we are written graph + * reg, or we should mask graph interrupt ??? + */ + NV_WRITE(NV04_PGRAPH_FIFO,0x0); + if (chid == chan->id) { + DRM_INFO("cleanning a channel with graph in current context\n"); + nouveau_wait_for_idle(dev); + DRM_INFO("reseting current graph context\n"); + nv10_graph_create_context(chan); + nv10_graph_load_context(chan); + } + NV_WRITE(NV04_PGRAPH_FIFO,0x1); } int nv10_graph_init(struct drm_device *dev) { @@ -722,10 +740,17 @@ int nv10_graph_init(struct drm_device *dev) { NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); - NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0030 | + //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */ + NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1<<29) | (1<<31)); + if (dev_priv->chipset>=0x17) { + NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000); + NV_WRITE(0x004006b0, 0x40000020); + } + else + NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); /* copy tile info from PFB */ for (i=0; i Date: Sun, 26 Aug 2007 20:48:32 +0200 Subject: [PATCH 291/437] nouveau : add NV04_PGRAPH_TRAPPED_ADDR definition - fix offset for nv04 - use it in nv10 graph ctx switch for getting next channel - dump NV10_PGRAPH_TRAPPED_DATA_HIGH on nv10+ --- shared-core/nouveau_irq.c | 24 ++++++++++++++++-------- shared-core/nouveau_reg.h | 8 +++++--- shared-core/nv10_graph.c | 2 +- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index d8a2c1b8..e64677ed 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -227,8 +227,10 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) struct drm_nouveau_private *dev_priv = dev->dev_private; int channel; - if (dev_priv->card_type < NV_40) { - channel = (NV_READ(0x400704) >> 20) & 0x1f; + if (dev_priv->card_type < NV_10) { + channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; + } else if (dev_priv->card_type < NV_40) { + channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; } else if (dev_priv->card_type < NV_50) { uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 4; @@ -283,16 +285,22 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t address; uint32_t channel, class; - uint32_t method, subc, data; + uint32_t method, subc, data, data2; uint32_t nsource, nstatus; if (nouveau_graph_trapped_channel(dev, &channel)) channel = -1; - address = NV_READ(0x400704); - subc = (address >> 16) & 0x7; + data = NV_READ(NV04_PGRAPH_TRAPPED_DATA); + address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR); method = address & 0x1FFC; - data = NV_READ(0x400708); + if (dev_priv->card_type < NV_10) { + subc = (address >> 13) & 0x7; + data2= 0; + } else { + subc = (address >> 16) & 0x7; + data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH); + } nsource = NV_READ(NV03_PGRAPH_NSOURCE); nstatus = NV_READ(NV03_PGRAPH_NSTATUS); if (dev_priv->card_type < NV_50) { @@ -309,8 +317,8 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) ARRAY_SIZE(nouveau_nstatus_names)); printk("\n"); - DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x\n", - channel, subc, class, method, data); + DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n", + channel, subc, class, method, data2, data); } static void nouveau_pgraph_irq_handler(struct drm_device *dev) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 1023e75e..a1895c34 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -15,9 +15,6 @@ # define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 # define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 -#define NV03_PGRAPH_STATUS 0x004006b0 -#define NV04_PGRAPH_STATUS 0x00400700 - #define NV_RAMIN 0x00700000 #define NV_RAMHT_HANDLE_OFFSET 0 @@ -264,7 +261,12 @@ #define NV04_PGRAPH_BLIMIT5 0x00400698 #define NV04_PGRAPH_BSWIZZLE2 0x0040069C #define NV04_PGRAPH_BSWIZZLE5 0x004006A0 +#define NV03_PGRAPH_STATUS 0x004006B0 +#define NV04_PGRAPH_STATUS 0x00400700 +#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 +#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 #define NV04_PGRAPH_SURFACE 0x0040070C +#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C #define NV04_PGRAPH_STATE 0x00400710 #define NV10_PGRAPH_SURFACE 0x00400710 #define NV04_PGRAPH_NOTIFY 0x00400714 diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index c604ff2e..e470ff06 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -594,7 +594,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) return; } - chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); + chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20)&(nouveau_fifo_number(dev)-1); next = dev_priv->fifos[chid]; if (!next) { From 589707b765eee78cc278c10603e2c858bb819436 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 28 Aug 2007 15:17:11 +1000 Subject: [PATCH 292/437] drm: remove XFREE86_VERSION macros --- linux-core/i810_drm.h | 5 ----- shared-core/drm.h | 20 +------------------- shared-core/r128_drm.h | 18 ------------------ 3 files changed, 1 insertion(+), 42 deletions(-) diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index eff61b4d..d803aeca 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func { /* This is the init structure after v1.2 */ typedef struct _drm_i810_init { drm_i810_init_func_t func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int ring_map_idx; - int buffer_map_idx; -#else unsigned int mmio_offset; unsigned int buffers_offset; -#endif int sarea_priv_offset; unsigned int ring_start; unsigned int ring_end; diff --git a/shared-core/drm.h b/shared-core/drm.h index db913b1f..a9882d49 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -89,24 +89,6 @@ #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #endif -#define XFREE86_VERSION(major,minor,patch,snap) \ - ((major << 16) | (minor << 8) | patch) - -#ifndef CONFIG_XFREE86_VERSION -#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0) -#endif - -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) -#define DRM_PROC_DEVICES "/proc/devices" -#define DRM_PROC_MISC "/proc/misc" -#define DRM_PROC_DRM "/proc/drm" -#define DRM_DEV_DRM "/dev/drm" -#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) -#define DRM_DEV_UID 0 -#define DRM_DEV_GID 0 -#endif - -#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0) #ifdef __OpenBSD__ #define DRM_MAJOR 81 #endif @@ -114,7 +96,7 @@ #define DRM_MAJOR 226 #endif #define DRM_MAX_MINOR 15 -#endif + #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ diff --git a/shared-core/r128_drm.h b/shared-core/r128_drm.h index e94a39c6..8d8878b5 100644 --- a/shared-core/r128_drm.h +++ b/shared-core/r128_drm.h @@ -222,11 +222,7 @@ typedef struct drm_r128_init { R128_INIT_CCE = 0x01, R128_CLEANUP_CCE = 0x02 } func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int sarea_priv_offset; -#else unsigned long sarea_priv_offset; -#endif int is_pci; int cce_mode; int cce_secure; @@ -240,21 +236,12 @@ typedef struct drm_r128_init { unsigned int depth_offset, depth_pitch; unsigned int span_offset; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - unsigned int fb_offset; - unsigned int mmio_offset; - unsigned int ring_offset; - unsigned int ring_rptr_offset; - unsigned int buffers_offset; - unsigned int agp_textures_offset; -#else unsigned long fb_offset; unsigned long mmio_offset; unsigned long ring_offset; unsigned long ring_rptr_offset; unsigned long buffers_offset; unsigned long agp_textures_offset; -#endif } drm_r128_init_t; typedef struct drm_r128_cce_stop { @@ -264,15 +251,10 @@ typedef struct drm_r128_cce_stop { typedef struct drm_r128_clear { unsigned int flags; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int x, y, w, h; -#endif unsigned int clear_color; unsigned int clear_depth; -#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0) unsigned int color_mask; unsigned int depth_mask; -#endif } drm_r128_clear_t; typedef struct drm_r128_vertex { From c78e610fa42c8122ed6bc504222ef650f5693d22 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Tue, 28 Aug 2007 12:23:51 -0700 Subject: [PATCH 293/437] Add register defines for hw binning --- shared-core/i915_drv.h | 115 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 528f7b3a..aff03bee 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -272,12 +272,25 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define MI_NO_WRITE_FLUSH (1 << 2) #define MI_READ_FLUSH (1 << 0) #define MI_EXE_FLUSH (1 << 1) +#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ + +/* Packet to load a register value from the ring/batch command stream: + */ +#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1) #define BB1_START_ADDR_MASK (~0x7) #define BB1_PROTECTED (1<<0) #define BB1_UNPROTECTED (0<<0) #define BB2_END_ADDR_MASK (~0x7) +/* Interrupt bits: + */ +#define USER_INT_FLAG (1<<1) +#define VSYNC_PIPEB_FLAG (1<<5) +#define VSYNC_PIPEA_FLAG (1<<7) +#define HWB_OOM_FLAG (1<<13) /* binner out of memory */ + #define I915REG_HWSTAM 0x02098 #define I915REG_INT_IDENTITY_R 0x020a4 #define I915REG_INT_MASK_R 0x020a8 @@ -315,6 +328,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define NOPID 0x2094 #define LP_RING 0x2030 #define HP_RING 0x2040 +/* The binner has its own ring buffer: + */ +#define HWB_RING 0x2400 + #define RING_TAIL 0x00 #define TAIL_ADDR 0x001FFFF8 #define RING_HEAD 0x04 @@ -333,11 +350,105 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 +/* Instruction parser error reg: + */ +#define IPEIR 0x2088 + +/* Scratch pad debug 0 reg: + */ +#define SCPD0 0x209c + +/* Error status reg: + */ +#define ESR 0x20b8 + +/* Secondary DMA fetch address debug reg: + */ +#define DMA_FADD_S 0x20d4 + +/* Cache mode 0 reg. + * - Manipulating render cache behaviour is central + * to the concept of zone rendering, tuning this reg can help avoid + * unnecessary render cache reads and even writes (for z/stencil) + * at beginning and end of scene. + * + * - To change a bit, write to this reg with a mask bit set and the + * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set. + */ +#define Cache_Mode_0 0x2120 +#define CM0_MASK_SHIFT 16 +#define CM0_IZ_OPT_DISABLE (1<<6) +#define CM0_ZR_OPT_DISABLE (1<<5) +#define CM0_DEPTH_EVICT_DISABLE (1<<4) +#define CM0_COLOR_EVICT_DISABLE (1<<3) +#define CM0_DEPTH_WRITE_DISABLE (1<<1) +#define CM0_RC_OP_FLUSH_DISABLE (1<<0) + + +/* Graphics flush control. A CPU write flushes the GWB of all writes. + * The data is discarded. + */ +#define GFX_FLSH_CNTL 0x2170 + +/* Binner control. Defines the location of the bin pointer list: + */ +#define BINCTL 0x2420 +#define BC_MASK (1 << 9) + +/* Binned scene info. + */ +#define BINSCENE 0x2428 +#define BS_OP_LOAD (1 << 8) +#define BS_MASK (1 << 22) + +/* Bin command parser debug reg: + */ +#define BCPD 0x2480 + +/* Bin memory control debug reg: + */ +#define BMCD 0x2484 + +/* Bin data cache debug reg: + */ +#define BDCD 0x2488 + +/* Binner pointer cache debug reg: + */ +#define BPCD 0x248c + +/* Binner scratch pad debug reg: + */ +#define BINSKPD 0x24f0 + +/* HWB scratch pad debug reg: + */ +#define HWBSKPD 0x24f4 + +/* Binner memory pool reg: + */ +#define BMP_BUFFER 0x2430 +#define BMP_PAGE_SIZE_4K (0 << 10) +#define BMP_BUFFER_SIZE_SHIFT 1 +#define BMP_ENABLE (1 << 0) + +/* Get/put memory from the binner memory pool: + */ +#define BMP_GET 0x2438 +#define BMP_PUT 0x2440 +#define BMP_OFFSET_SHIFT 5 + +/* 3D state packets: + */ +#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) + #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) #define SC_ENABLE_MASK (0x1<<0) #define SC_ENABLE (0x1<<0) +#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) + #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) #define SCI_YMIN_MASK (0xffff<<16) #define SCI_XMIN_MASK (0xffff<<0) @@ -378,6 +489,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) +/* Define the region of interest for the binner: + */ +#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4) + #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define BREADCRUMB_BITS 31 From 2bcd5b5e330843e1e1a5f0a19105ecd33e76b00b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 29 Aug 2007 00:04:18 -0700 Subject: [PATCH 294/437] Use DRM_SPINLOCK / DRM_UNSPINLOCK macros. --- linux-core/xgi_drv.c | 2 +- linux-core/xgi_drv.h | 2 +- linux-core/xgi_fence.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 241cd39f..6b576558 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -202,7 +202,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, int err; - spin_lock_init(&info->fence_lock); + DRM_SPINLOCK_INIT(&info->fence_lock); info->next_sequence = 0; info->complete_sequence = 0; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index c815f63e..d43a6b4e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -73,7 +73,7 @@ struct xgi_info { struct xgi_cmdring_info cmdring; - spinlock_t fence_lock; + DRM_SPINTYPE fence_lock; unsigned complete_sequence; unsigned next_sequence; }; diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index e5b545de..42ed814d 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -41,7 +41,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if ((info == NULL) || (class != 0)) return 0; - spin_lock(&info->fence_lock); + DRM_SPINLOCK(&info->fence_lock); pending_flush_types = fc->pending_flush | ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); @@ -64,7 +64,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) } } - spin_unlock(&info->fence_lock); + DRM_SPINUNLOCK(&info->fence_lock); return fc->pending_flush | ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); @@ -81,12 +81,12 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, return -EINVAL; - spin_lock(&info->fence_lock); + DRM_SPINLOCK(&info->fence_lock); info->next_sequence++; if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { info->next_sequence = 1; } - spin_unlock(&info->fence_lock); + DRM_SPINUNLOCK(&info->fence_lock); *sequence = (uint32_t) info->next_sequence; From c46ffd6b2943332a88589fb525305ffd09d35b8d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 29 Aug 2007 00:23:30 -0700 Subject: [PATCH 295/437] Fix late night dumb-dumb mistake. --- linux-core/xgi_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 6b576558..4b90579e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -202,7 +202,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, int err; - DRM_SPINLOCK_INIT(&info->fence_lock); + DRM_SPININIT(&info->fence_lock, "fence lock"); info->next_sequence = 0; info->complete_sequence = 0; From 9c5b9d458bc618fb9d7d8590c866655e92f9cb0b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 29 Aug 2007 14:41:49 -0700 Subject: [PATCH 296/437] Use ati_pcigart for PCI-e GART table handling. --- linux-core/xgi_drv.c | 2 +- linux-core/xgi_drv.h | 3 +- linux-core/xgi_pcie.c | 89 ++++++++++++++----------------------------- 3 files changed, 30 insertions(+), 64 deletions(-) diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 4b90579e..84547f62 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -306,7 +306,7 @@ void xgi_driver_lastclose(struct drm_device * dev) info->fb_map = NULL; if (info->pcie_heap_initialized) { - xgi_pcie_lut_cleanup(info); + drm_ati_pcigart_cleanup(dev, &info->gart_info); } if (info->fb_heap_initialized diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index d43a6b4e..f2768d1b 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -64,7 +64,7 @@ struct xgi_info { struct drm_map *fb_map; /* look up table parameters */ - struct drm_dma_handle *lut_handle; + struct ati_pcigart_info gart_info; unsigned int lutPageSize; struct drm_sman sman; @@ -87,7 +87,6 @@ extern int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); -extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b4d204c1..a7d3ea24 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -28,15 +28,31 @@ #include "xgi_regs.h" #include "xgi_misc.h" -static int xgi_pcie_lut_init(struct xgi_info * info) +void xgi_gart_flush(struct drm_device *dev) +{ + struct xgi_info *const info = dev->dev_private; + u8 temp; + + DRM_MEMORYBARRIER(); + + /* Set GART in SFB */ + temp = DRM_READ8(info->mmio_map, 0xB00C); + DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); + + /* Set GART base address to HW */ + DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); + + /* Flush GART table. */ + DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); + DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); +} + + +int xgi_pcie_heap_init(struct xgi_info * info) { u8 temp = 0; int err; - unsigned i; struct drm_scatter_gather request; - struct drm_sg_mem *sg; - u32 *lut; - /* Get current FB aperture size */ temp = IN3X5B(info->mmio_map, 0x27); @@ -70,73 +86,24 @@ static int xgi_pcie_lut_init(struct xgi_info * info) return err; } - sg = info->dev->sg; + info->gart_info.gart_table_location = DRM_ATI_GART_MAIN; + info->gart_info.gart_reg_if = DRM_ATI_GART_PCI; + info->gart_info.table_size = info->dev->sg->pages * sizeof(u32); - info->lut_handle = drm_pci_alloc(info->dev, - sizeof(u32) * sg->pages, - PAGE_SIZE, - DMA_31BIT_MASK); - if (info->lut_handle == NULL) { - DRM_ERROR("cannot allocate PCIE lut page!\n"); + if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) { + DRM_ERROR("failed to init PCI GART!\n"); return -ENOMEM; } - lut = info->lut_handle->vaddr; - for (i = 0; i < sg->pages; i++) { - info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev, - sg->pagelist[i], - 0, - PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(info->dev->sg->busaddr[i])) { - DRM_ERROR("cannot map GART backing store for DMA!\n"); - return info->dev->sg->busaddr[i]; - } - - lut[i] = info->dev->sg->busaddr[i]; - } - - DRM_MEMORYBARRIER(); - - /* Set GART in SFB */ - temp = DRM_READ8(info->mmio_map, 0xB00C); - DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); - - /* Set GART base address to HW */ - DRM_WRITE32(info->mmio_map, 0xB034, info->lut_handle->busaddr); - - /* Flush GART table. */ - DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); - DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); - - return 0; -} - -void xgi_pcie_lut_cleanup(struct xgi_info * info) -{ - if (info->lut_handle) { - drm_pci_free(info->dev, info->lut_handle); - info->lut_handle = NULL; - } -} - -int xgi_pcie_heap_init(struct xgi_info * info) -{ - int err; - - err = xgi_pcie_lut_init(info); - if (err) { - DRM_ERROR("xgi_pcie_lut_init failed\n"); - return err; - } + xgi_gart_flush(info->dev); mutex_lock(&info->dev->struct_mutex); err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL, 0, info->pcie.size); mutex_unlock(&info->dev->struct_mutex); if (err) { - xgi_pcie_lut_cleanup(info); + drm_ati_pcigart_cleanup(info->dev, &info->gart_info); } info->pcie_heap_initialized = (err == 0); From 69b11f44f0a0cfe0806e18dae2f360bc1ed8e005 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Thu, 30 Aug 2007 21:51:53 +0200 Subject: [PATCH 297/437] nouveau: give nv03 the last cut. --- shared-core/drm_pciids.txt | 7 ------- shared-core/nouveau_drm.h | 2 -- shared-core/nouveau_fifo.c | 3 --- shared-core/nouveau_mem.c | 12 ------------ shared-core/nouveau_state.c | 3 --- shared-core/nv04_instmem.c | 1 - 6 files changed, 28 deletions(-) diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 4bd690b2..8d90f3a7 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -482,9 +482,6 @@ 0x10DE 0x009E NV40 "NVidia 0x009E" [nouveau] -0x10de 0x0008 NV_03 "EDGE 3D" -0x10de 0x0009 NV_03 "EDGE 3D" -0x10de 0x0010 NV_03 "Mutara V08" 0x10de 0x0020 NV_04 "RIVA TNT" 0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro" 0x10de 0x0029 NV_04 "RIVA TNT2 Ultra" @@ -732,10 +729,6 @@ 0x10de 0x0421 NV_50 "GeForce 8500 GT" 0x10de 0x0422 NV_50 "GeForce 8400 GS" 0x10de 0x0423 NV_50 "GeForce 8300 GS" -0x12d2 0x0008 NV_03 "NV1" -0x12d2 0x0009 NV_03 "DAC64" -0x12d2 0x0018 NV_03 "Riva128" -0x12d2 0x0019 NV_03 "Riva128ZX" 0x12d2 0x0020 NV_04 "TNT" 0x12d2 0x0028 NV_04 "TNT2" 0x12d2 0x0029 NV_04 "UTNT2" diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index bfc9bd4b..c4f1e9a4 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -119,8 +119,6 @@ struct drm_nouveau_setparam { enum nouveau_card_type { NV_UNKNOWN =0, - NV_01 =1, - NV_03 =3, NV_04 =4, NV_05 =5, NV_10 =10, diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 1aa724f1..437c84f2 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -34,8 +34,6 @@ int nouveau_fifo_number(struct drm_device *dev) struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { - case NV_03: - return 8; case NV_04: case NV_05: return 16; @@ -109,7 +107,6 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev) case NV_11: case NV_10: case NV_04: - case NV_03: NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); break; } diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 2cc0ed77..dbfba351 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -257,18 +257,6 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { - case NV_03: - switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) - { - case NV03_BOOT_0_RAM_AMOUNT_8MB: - case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM: - return 8*1024*1024; - case NV03_BOOT_0_RAM_AMOUNT_4MB: - return 4*1024*1024; - case NV03_BOOT_0_RAM_AMOUNT_2MB: - return 2*1024*1024; - } - break; case NV_04: case NV_05: if (NV_READ(NV03_BOOT_0) & 0x00000100) { diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index d885f7c6..e73b4878 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -538,9 +538,6 @@ void nouveau_wait_for_idle(struct drm_device *dev) { struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { - case NV_03: - while (NV_READ(NV03_PGRAPH_STATUS)); - break; case NV_50: break; default: { diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index fed6ff7e..5a446450 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -70,7 +70,6 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev) case NV_11: case NV_10: case NV_04: - case NV_03: default: dev_priv->ramfc_offset = 0x11400; dev_priv->ramfc_size = nouveau_fifo_number(dev) * From bac3f49daa54bf34ea21854be23061d10a0d0d1b Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Fri, 31 Aug 2007 01:39:40 +0200 Subject: [PATCH 298/437] nouveau: nv04 context switching support. Works for starting X up at least. --- shared-core/nv04_graph.c | 638 +++++++++++++++++++++------------------ 1 file changed, 348 insertions(+), 290 deletions(-) diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 050f6e81..213696ca 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -27,262 +27,321 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" -struct reg_interval -{ - uint32_t reg; - int number; -} nv04_graph_ctx_regs [] = { - {NV04_PGRAPH_CTX_SWITCH1,1}, - {NV04_PGRAPH_CTX_SWITCH2,1}, - {NV04_PGRAPH_CTX_SWITCH3,1}, - {NV04_PGRAPH_CTX_SWITCH4,1}, - {NV04_PGRAPH_CTX_CACHE1,1}, - {NV04_PGRAPH_CTX_CACHE2,1}, - {NV04_PGRAPH_CTX_CACHE3,1}, - {NV04_PGRAPH_CTX_CACHE4,1}, - {0x00400184,1}, - {0x004001a4,1}, - {0x004001c4,1}, - {0x004001e4,1}, - {0x00400188,1}, - {0x004001a8,1}, - {0x004001c8,1}, - {0x004001e8,1}, - {0x0040018c,1}, - {0x004001ac,1}, - {0x004001cc,1}, - {0x004001ec,1}, - {0x00400190,1}, - {0x004001b0,1}, - {0x004001d0,1}, - {0x004001f0,1}, - {0x00400194,1}, - {0x004001b4,1}, - {0x004001d4,1}, - {0x004001f4,1}, - {0x00400198,1}, - {0x004001b8,1}, - {0x004001d8,1}, - {0x004001f8,1}, - {0x0040019c,1}, - {0x004001bc,1}, - {0x004001dc,1}, - {0x004001fc,1}, - {0x00400174,1}, - {NV04_PGRAPH_DMA_START_0,1}, - {NV04_PGRAPH_DMA_START_1,1}, - {NV04_PGRAPH_DMA_LENGTH,1}, - {NV04_PGRAPH_DMA_MISC,1}, - {NV04_PGRAPH_DMA_PITCH,1}, - {NV04_PGRAPH_BOFFSET0,1}, - {NV04_PGRAPH_BBASE0,1}, - {NV04_PGRAPH_BLIMIT0,1}, - {NV04_PGRAPH_BOFFSET1,1}, - {NV04_PGRAPH_BBASE1,1}, - {NV04_PGRAPH_BLIMIT1,1}, - {NV04_PGRAPH_BOFFSET2,1}, - {NV04_PGRAPH_BBASE2,1}, - {NV04_PGRAPH_BLIMIT2,1}, - {NV04_PGRAPH_BOFFSET3,1}, - {NV04_PGRAPH_BBASE3,1}, - {NV04_PGRAPH_BLIMIT3,1}, - {NV04_PGRAPH_BOFFSET4,1}, - {NV04_PGRAPH_BBASE4,1}, - {NV04_PGRAPH_BLIMIT4,1}, - {NV04_PGRAPH_BOFFSET5,1}, - {NV04_PGRAPH_BBASE5,1}, - {NV04_PGRAPH_BLIMIT5,1}, - {NV04_PGRAPH_BPITCH0,1}, - {NV04_PGRAPH_BPITCH1,1}, - {NV04_PGRAPH_BPITCH2,1}, - {NV04_PGRAPH_BPITCH3,1}, - {NV04_PGRAPH_BPITCH4,1}, - {NV04_PGRAPH_SURFACE,1}, - {NV04_PGRAPH_STATE,1}, - {NV04_PGRAPH_BSWIZZLE2,1}, - {NV04_PGRAPH_BSWIZZLE5,1}, - {NV04_PGRAPH_BPIXEL,1}, - {NV04_PGRAPH_NOTIFY,1}, - {NV04_PGRAPH_PATT_COLOR0,1}, - {NV04_PGRAPH_PATT_COLOR1,1}, - {NV04_PGRAPH_PATT_COLORRAM,64}, - {NV04_PGRAPH_PATTERN,1}, - {0x0040080c,1}, - {NV04_PGRAPH_PATTERN_SHAPE,1}, - {0x00400600,1}, - {NV04_PGRAPH_ROP3,1}, - {NV04_PGRAPH_CHROMA,1}, - {NV04_PGRAPH_BETA_AND,1}, - {NV04_PGRAPH_BETA_PREMULT,1}, - {NV04_PGRAPH_CONTROL0,1}, - {NV04_PGRAPH_CONTROL1,1}, - {NV04_PGRAPH_CONTROL2,1}, - {NV04_PGRAPH_BLEND,1}, - {NV04_PGRAPH_STORED_FMT,1}, - {NV04_PGRAPH_SOURCE_COLOR,1}, - {0x00400560,1}, - {0x00400568,1}, - {0x00400564,1}, - {0x0040056c,1}, - {0x00400400,1}, - {0x00400480,1}, - {0x00400404,1}, - {0x00400484,1}, - {0x00400408,1}, - {0x00400488,1}, - {0x0040040c,1}, - {0x0040048c,1}, - {0x00400410,1}, - {0x00400490,1}, - {0x00400414,1}, - {0x00400494,1}, - {0x00400418,1}, - {0x00400498,1}, - {0x0040041c,1}, - {0x0040049c,1}, - {0x00400420,1}, - {0x004004a0,1}, - {0x00400424,1}, - {0x004004a4,1}, - {0x00400428,1}, - {0x004004a8,1}, - {0x0040042c,1}, - {0x004004ac,1}, - {0x00400430,1}, - {0x004004b0,1}, - {0x00400434,1}, - {0x004004b4,1}, - {0x00400438,1}, - {0x004004b8,1}, - {0x0040043c,1}, - {0x004004bc,1}, - {0x00400440,1}, - {0x004004c0,1}, - {0x00400444,1}, - {0x004004c4,1}, - {0x00400448,1}, - {0x004004c8,1}, - {0x0040044c,1}, - {0x004004cc,1}, - {0x00400450,1}, - {0x004004d0,1}, - {0x00400454,1}, - {0x004004d4,1}, - {0x00400458,1}, - {0x004004d8,1}, - {0x0040045c,1}, - {0x004004dc,1}, - {0x00400460,1}, - {0x004004e0,1}, - {0x00400464,1}, - {0x004004e4,1}, - {0x00400468,1}, - {0x004004e8,1}, - {0x0040046c,1}, - {0x004004ec,1}, - {0x00400470,1}, - {0x004004f0,1}, - {0x00400474,1}, - {0x004004f4,1}, - {0x00400478,1}, - {0x004004f8,1}, - {0x0040047c,1}, - {0x004004fc,1}, - {0x0040053c,1}, - {0x00400544,1}, - {0x00400540,1}, - {0x00400548,1}, - {0x00400560,1}, - {0x00400568,1}, - {0x00400564,1}, - {0x0040056c,1}, - {0x00400534,1}, - {0x00400538,1}, - {0x00400514,1}, - {0x00400518,1}, - {0x0040051c,1}, - {0x00400520,1}, - {0x00400524,1}, - {0x00400528,1}, - {0x0040052c,1}, - {0x00400530,1}, - {0x00400d00,1}, - {0x00400d40,1}, - {0x00400d80,1}, - {0x00400d04,1}, - {0x00400d44,1}, - {0x00400d84,1}, - {0x00400d08,1}, - {0x00400d48,1}, - {0x00400d88,1}, - {0x00400d0c,1}, - {0x00400d4c,1}, - {0x00400d8c,1}, - {0x00400d10,1}, - {0x00400d50,1}, - {0x00400d90,1}, - {0x00400d14,1}, - {0x00400d54,1}, - {0x00400d94,1}, - {0x00400d18,1}, - {0x00400d58,1}, - {0x00400d98,1}, - {0x00400d1c,1}, - {0x00400d5c,1}, - {0x00400d9c,1}, - {0x00400d20,1}, - {0x00400d60,1}, - {0x00400da0,1}, - {0x00400d24,1}, - {0x00400d64,1}, - {0x00400da4,1}, - {0x00400d28,1}, - {0x00400d68,1}, - {0x00400da8,1}, - {0x00400d2c,1}, - {0x00400d6c,1}, - {0x00400dac,1}, - {0x00400d30,1}, - {0x00400d70,1}, - {0x00400db0,1}, - {0x00400d34,1}, - {0x00400d74,1}, - {0x00400db4,1}, - {0x00400d38,1}, - {0x00400d78,1}, - {0x00400db8,1}, - {0x00400d3c,1}, - {0x00400d7c,1}, - {0x00400dbc,1}, - {0x00400590,1}, - {0x00400594,1}, - {0x00400598,1}, - {0x0040059c,1}, - {0x004005a8,1}, - {0x004005ac,1}, - {0x004005b0,1}, - {0x004005b4,1}, - {0x004005c0,1}, - {0x004005c4,1}, - {0x004005c8,1}, - {0x004005cc,1}, - {0x004005d0,1}, - {0x004005d4,1}, - {0x004005d8,1}, - {0x004005dc,1}, - {0x004005e0,1}, - {NV04_PGRAPH_PASSTHRU_0,1}, - {NV04_PGRAPH_PASSTHRU_1,1}, - {NV04_PGRAPH_PASSTHRU_2,1}, - {NV04_PGRAPH_DVD_COLORFMT,1}, - {NV04_PGRAPH_SCALED_FORMAT,1}, - {NV04_PGRAPH_MISC24_0,1}, - {NV04_PGRAPH_MISC24_1,1}, - {NV04_PGRAPH_MISC24_2,1}, - {0x00400500,1}, - {0x00400504,1}, - {NV04_PGRAPH_VALID1,1}, - {NV04_PGRAPH_VALID2,1} +static uint32_t nv04_graph_ctx_regs [] = { + NV04_PGRAPH_CTX_SWITCH1, + NV04_PGRAPH_CTX_SWITCH2, + NV04_PGRAPH_CTX_SWITCH3, + NV04_PGRAPH_CTX_SWITCH4, + NV04_PGRAPH_CTX_CACHE1, + NV04_PGRAPH_CTX_CACHE2, + NV04_PGRAPH_CTX_CACHE3, + NV04_PGRAPH_CTX_CACHE4, + 0x00400184, + 0x004001a4, + 0x004001c4, + 0x004001e4, + 0x00400188, + 0x004001a8, + 0x004001c8, + 0x004001e8, + 0x0040018c, + 0x004001ac, + 0x004001cc, + 0x004001ec, + 0x00400190, + 0x004001b0, + 0x004001d0, + 0x004001f0, + 0x00400194, + 0x004001b4, + 0x004001d4, + 0x004001f4, + 0x00400198, + 0x004001b8, + 0x004001d8, + 0x004001f8, + 0x0040019c, + 0x004001bc, + 0x004001dc, + 0x004001fc, + 0x00400174, + NV04_PGRAPH_DMA_START_0, + NV04_PGRAPH_DMA_START_1, + NV04_PGRAPH_DMA_LENGTH, + NV04_PGRAPH_DMA_MISC, + NV04_PGRAPH_DMA_PITCH, + NV04_PGRAPH_BOFFSET0, + NV04_PGRAPH_BBASE0, + NV04_PGRAPH_BLIMIT0, + NV04_PGRAPH_BOFFSET1, + NV04_PGRAPH_BBASE1, + NV04_PGRAPH_BLIMIT1, + NV04_PGRAPH_BOFFSET2, + NV04_PGRAPH_BBASE2, + NV04_PGRAPH_BLIMIT2, + NV04_PGRAPH_BOFFSET3, + NV04_PGRAPH_BBASE3, + NV04_PGRAPH_BLIMIT3, + NV04_PGRAPH_BOFFSET4, + NV04_PGRAPH_BBASE4, + NV04_PGRAPH_BLIMIT4, + NV04_PGRAPH_BOFFSET5, + NV04_PGRAPH_BBASE5, + NV04_PGRAPH_BLIMIT5, + NV04_PGRAPH_BPITCH0, + NV04_PGRAPH_BPITCH1, + NV04_PGRAPH_BPITCH2, + NV04_PGRAPH_BPITCH3, + NV04_PGRAPH_BPITCH4, + NV04_PGRAPH_SURFACE, + NV04_PGRAPH_STATE, + NV04_PGRAPH_BSWIZZLE2, + NV04_PGRAPH_BSWIZZLE5, + NV04_PGRAPH_BPIXEL, + NV04_PGRAPH_NOTIFY, + NV04_PGRAPH_PATT_COLOR0, + NV04_PGRAPH_PATT_COLOR1, + NV04_PGRAPH_PATT_COLORRAM+0x00, + NV04_PGRAPH_PATT_COLORRAM+0x01, + NV04_PGRAPH_PATT_COLORRAM+0x02, + NV04_PGRAPH_PATT_COLORRAM+0x03, + NV04_PGRAPH_PATT_COLORRAM+0x04, + NV04_PGRAPH_PATT_COLORRAM+0x05, + NV04_PGRAPH_PATT_COLORRAM+0x06, + NV04_PGRAPH_PATT_COLORRAM+0x07, + NV04_PGRAPH_PATT_COLORRAM+0x08, + NV04_PGRAPH_PATT_COLORRAM+0x09, + NV04_PGRAPH_PATT_COLORRAM+0x0A, + NV04_PGRAPH_PATT_COLORRAM+0x0B, + NV04_PGRAPH_PATT_COLORRAM+0x0C, + NV04_PGRAPH_PATT_COLORRAM+0x0D, + NV04_PGRAPH_PATT_COLORRAM+0x0E, + NV04_PGRAPH_PATT_COLORRAM+0x0F, + NV04_PGRAPH_PATT_COLORRAM+0x10, + NV04_PGRAPH_PATT_COLORRAM+0x11, + NV04_PGRAPH_PATT_COLORRAM+0x12, + NV04_PGRAPH_PATT_COLORRAM+0x13, + NV04_PGRAPH_PATT_COLORRAM+0x14, + NV04_PGRAPH_PATT_COLORRAM+0x15, + NV04_PGRAPH_PATT_COLORRAM+0x16, + NV04_PGRAPH_PATT_COLORRAM+0x17, + NV04_PGRAPH_PATT_COLORRAM+0x18, + NV04_PGRAPH_PATT_COLORRAM+0x19, + NV04_PGRAPH_PATT_COLORRAM+0x1A, + NV04_PGRAPH_PATT_COLORRAM+0x1B, + NV04_PGRAPH_PATT_COLORRAM+0x1C, + NV04_PGRAPH_PATT_COLORRAM+0x1D, + NV04_PGRAPH_PATT_COLORRAM+0x1E, + NV04_PGRAPH_PATT_COLORRAM+0x1F, + NV04_PGRAPH_PATT_COLORRAM+0x20, + NV04_PGRAPH_PATT_COLORRAM+0x21, + NV04_PGRAPH_PATT_COLORRAM+0x22, + NV04_PGRAPH_PATT_COLORRAM+0x23, + NV04_PGRAPH_PATT_COLORRAM+0x24, + NV04_PGRAPH_PATT_COLORRAM+0x25, + NV04_PGRAPH_PATT_COLORRAM+0x26, + NV04_PGRAPH_PATT_COLORRAM+0x27, + NV04_PGRAPH_PATT_COLORRAM+0x28, + NV04_PGRAPH_PATT_COLORRAM+0x29, + NV04_PGRAPH_PATT_COLORRAM+0x2A, + NV04_PGRAPH_PATT_COLORRAM+0x2B, + NV04_PGRAPH_PATT_COLORRAM+0x2C, + NV04_PGRAPH_PATT_COLORRAM+0x2D, + NV04_PGRAPH_PATT_COLORRAM+0x2E, + NV04_PGRAPH_PATT_COLORRAM+0x2F, + NV04_PGRAPH_PATT_COLORRAM+0x30, + NV04_PGRAPH_PATT_COLORRAM+0x31, + NV04_PGRAPH_PATT_COLORRAM+0x32, + NV04_PGRAPH_PATT_COLORRAM+0x33, + NV04_PGRAPH_PATT_COLORRAM+0x34, + NV04_PGRAPH_PATT_COLORRAM+0x35, + NV04_PGRAPH_PATT_COLORRAM+0x36, + NV04_PGRAPH_PATT_COLORRAM+0x37, + NV04_PGRAPH_PATT_COLORRAM+0x38, + NV04_PGRAPH_PATT_COLORRAM+0x39, + NV04_PGRAPH_PATT_COLORRAM+0x3A, + NV04_PGRAPH_PATT_COLORRAM+0x3B, + NV04_PGRAPH_PATT_COLORRAM+0x3C, + NV04_PGRAPH_PATT_COLORRAM+0x3D, + NV04_PGRAPH_PATT_COLORRAM+0x3E, + NV04_PGRAPH_PATT_COLORRAM+0x3F, + NV04_PGRAPH_PATTERN, + 0x0040080c, + NV04_PGRAPH_PATTERN_SHAPE, + 0x00400600, + NV04_PGRAPH_ROP3, + NV04_PGRAPH_CHROMA, + NV04_PGRAPH_BETA_AND, + NV04_PGRAPH_BETA_PREMULT, + NV04_PGRAPH_CONTROL0, + NV04_PGRAPH_CONTROL1, + NV04_PGRAPH_CONTROL2, + NV04_PGRAPH_BLEND, + NV04_PGRAPH_STORED_FMT, + NV04_PGRAPH_SOURCE_COLOR, + 0x00400560, + 0x00400568, + 0x00400564, + 0x0040056c, + 0x00400400, + 0x00400480, + 0x00400404, + 0x00400484, + 0x00400408, + 0x00400488, + 0x0040040c, + 0x0040048c, + 0x00400410, + 0x00400490, + 0x00400414, + 0x00400494, + 0x00400418, + 0x00400498, + 0x0040041c, + 0x0040049c, + 0x00400420, + 0x004004a0, + 0x00400424, + 0x004004a4, + 0x00400428, + 0x004004a8, + 0x0040042c, + 0x004004ac, + 0x00400430, + 0x004004b0, + 0x00400434, + 0x004004b4, + 0x00400438, + 0x004004b8, + 0x0040043c, + 0x004004bc, + 0x00400440, + 0x004004c0, + 0x00400444, + 0x004004c4, + 0x00400448, + 0x004004c8, + 0x0040044c, + 0x004004cc, + 0x00400450, + 0x004004d0, + 0x00400454, + 0x004004d4, + 0x00400458, + 0x004004d8, + 0x0040045c, + 0x004004dc, + 0x00400460, + 0x004004e0, + 0x00400464, + 0x004004e4, + 0x00400468, + 0x004004e8, + 0x0040046c, + 0x004004ec, + 0x00400470, + 0x004004f0, + 0x00400474, + 0x004004f4, + 0x00400478, + 0x004004f8, + 0x0040047c, + 0x004004fc, + 0x0040053c, + 0x00400544, + 0x00400540, + 0x00400548, + 0x00400560, + 0x00400568, + 0x00400564, + 0x0040056c, + 0x00400534, + 0x00400538, + 0x00400514, + 0x00400518, + 0x0040051c, + 0x00400520, + 0x00400524, + 0x00400528, + 0x0040052c, + 0x00400530, + 0x00400d00, + 0x00400d40, + 0x00400d80, + 0x00400d04, + 0x00400d44, + 0x00400d84, + 0x00400d08, + 0x00400d48, + 0x00400d88, + 0x00400d0c, + 0x00400d4c, + 0x00400d8c, + 0x00400d10, + 0x00400d50, + 0x00400d90, + 0x00400d14, + 0x00400d54, + 0x00400d94, + 0x00400d18, + 0x00400d58, + 0x00400d98, + 0x00400d1c, + 0x00400d5c, + 0x00400d9c, + 0x00400d20, + 0x00400d60, + 0x00400da0, + 0x00400d24, + 0x00400d64, + 0x00400da4, + 0x00400d28, + 0x00400d68, + 0x00400da8, + 0x00400d2c, + 0x00400d6c, + 0x00400dac, + 0x00400d30, + 0x00400d70, + 0x00400db0, + 0x00400d34, + 0x00400d74, + 0x00400db4, + 0x00400d38, + 0x00400d78, + 0x00400db8, + 0x00400d3c, + 0x00400d7c, + 0x00400dbc, + 0x00400590, + 0x00400594, + 0x00400598, + 0x0040059c, + 0x004005a8, + 0x004005ac, + 0x004005b0, + 0x004005b4, + 0x004005c0, + 0x004005c4, + 0x004005c8, + 0x004005cc, + 0x004005d0, + 0x004005d4, + 0x004005d8, + 0x004005dc, + 0x004005e0, + NV04_PGRAPH_PASSTHRU_0, + NV04_PGRAPH_PASSTHRU_1, + NV04_PGRAPH_PASSTHRU_2, + NV04_PGRAPH_DVD_COLORFMT, + NV04_PGRAPH_SCALED_FORMAT, + NV04_PGRAPH_MISC24_0, + NV04_PGRAPH_MISC24_1, + NV04_PGRAPH_MISC24_2, + 0x00400500, + 0x00400504, + NV04_PGRAPH_VALID1, + NV04_PGRAPH_VALID2 }; @@ -290,43 +349,35 @@ struct reg_interval void nouveau_nv04_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int channel, channel_old, i, j, index; + struct nouveau_channel *next, *last; + int chid; - channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - channel_old = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); + next = dev_priv->fifos[chid]; - DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + last = dev_priv->fifos[chid]; + + DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",last->id, next->id); NV_WRITE(NV03_PFIFO_CACHES, 0x0); NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0); NV_WRITE(NV04_PGRAPH_FIFO,0x0); - nouveau_wait_for_idle(dev); + nv04_graph_save_context(last); - // save PGRAPH context - index=0; - for (i = 0; ififos[channel_old]->pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4); - index++; - } + nouveau_wait_for_idle(dev); NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000); NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24)); - // restore PGRAPH context - index=0; - for (i = 0; ififos[channel]->pgraph_ctx[index]); - index++; - } + nouveau_wait_for_idle(dev); + + nv04_graph_load_context(last); NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100); - NV_WRITE(NV04_PGRAPH_CTX_USER, channel << 24); + NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24); NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF); NV_WRITE(NV04_PGRAPH_FIFO,0x0); @@ -356,19 +407,30 @@ void nv04_graph_destroy_context(struct nouveau_channel *chan) int nv04_graph_load_context(struct nouveau_channel *chan) { - DRM_ERROR("stub!\n"); + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) + NV_WRITE(nv04_graph_ctx_regs[i], chan->pgraph_ctx[i]); + return 0; } int nv04_graph_save_context(struct nouveau_channel *chan) { - DRM_ERROR("stub!\n"); + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) + chan->pgraph_ctx[i] = NV_READ(nv04_graph_ctx_regs[i]); + return 0; } int nv04_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int i,sum=0; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); @@ -380,23 +442,19 @@ int nv04_graph_init(struct drm_device *dev) { NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); // check the context is big enough - for ( i = 0 ; isizeof(dev_priv->fifos[0]->pgraph_ctx) ) + if ( sizeof(nv04_graph_ctx_regs)>sizeof(dev_priv->fifos[0]->pgraph_ctx) ) DRM_ERROR("pgraph_ctx too small\n"); - NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); - NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); - NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); - NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1230C000); - NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111101); - NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11D5F071); + NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000); + NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100); + NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870); NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31); NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 | (0x00D00000) | (1<<29) | (1<<31)); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31); NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100); From bb3da88601749cd647632eed86fb57dfd7cb81ee Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 31 Aug 2007 10:48:13 -0700 Subject: [PATCH 299/437] Acutally emit the IRQ (duh) when setting the fence post. --- linux-core/xgi_fence.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index 42ed814d..adedf300 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -87,7 +87,9 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, info->next_sequence = 1; } DRM_SPINUNLOCK(&info->fence_lock); - + + + xgi_emit_irq(info); *sequence = (uint32_t) info->next_sequence; *native_type = DRM_FENCE_TYPE_EXE; From ef4944de85b974e6b91087fdcb8f241f2619d28d Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Tue, 4 Sep 2007 18:51:57 +0200 Subject: [PATCH 300/437] Add context init voodoo and context switch code for NV41. --- shared-core/nv40_graph.c | 147 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 26237c7d..99c77cb9 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -34,6 +34,7 @@ * between the contexts */ #define NV40_GRCTX_SIZE (175*1024) +#define NV41_GRCTX_SIZE (92*1024) #define NV43_GRCTX_SIZE (70*1024) #define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */ #define NV49_GRCTX_SIZE (164640) @@ -187,6 +188,116 @@ nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } +static void +nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); + INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); + for (i = 0x00000178; i <= 0x00000180; i += 4) + INSTANCE_WR(ctx, i/4, 0x00000040); + INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); + for (i = 0x00000194; i <= 0x000001b0; i += 4) + INSTANCE_WR(ctx, i/4, 0x80000000); + INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); + for (i = 0x00000350; i <= 0x0000035c; i += 4) + INSTANCE_WR(ctx, i/4, 0x55555555); + INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); + INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00000408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00000430/4, 0x00011100); + for (i = 0x0000044c; i <= 0x00000488; i += 4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00000538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00); + for (i = 0x000005dc; i <= 0x00000618; i += 4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i = 0x0000061c; i <= 0x00000658; i += 4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i = 0x0000069c; i <= 0x000006d8; i += 4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i = 0x000006dc; i <= 0x00000718; i += 4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i = 0x0000071c; i <= 0x00000758; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i = 0x0000079c; i <= 0x000007d8; i += 4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i = 0x0000082c; i <= 0x00000838; i += 4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i = 0x0000083c; i <= 0x00000848; i += 4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i = 0x0000085c; i <= 0x00000868; i += 4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i = 0x0000087c; i <= 0x00000888; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002); + INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021); + INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00); + INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000); + INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005); + INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff); + for (i = 0x00000ad4; i <= 0x00000ae4; i += 4) + INSTANCE_WR(ctx, i/4, 0x00005555); + INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001); + INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001); + for (i = 0x00002ee8; i <= 0x00002f60; i += 8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i = 0x00005168; i <= 0x00007358; i += 24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i = 0x00007368; i <= 0x00007758; i += 16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i = 0x0000a068; i <= 0x0000c258; i += 24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i = 0x0000c268; i <= 0x0000c658; i += 16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i = 0x0000ef68; i <= 0x00011158; i += 24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i = 0x00011168; i <= 0x00011558; i += 16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i = 0x00013e68; i <= 0x00016058; i += 24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i = 0x00016068; i <= 0x00016458; i += 16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +}; + static void nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { @@ -1237,6 +1348,10 @@ nv40_graph_create_context(struct nouveau_channel *chan) ctx_size = NV40_GRCTX_SIZE; ctx_init = nv40_graph_context_init; break; + case 0x41: + ctx_size = NV41_GRCTX_SIZE; + ctx_init = nv41_graph_context_init; + break; case 0x43: ctx_size = NV43_GRCTX_SIZE; ctx_init = nv43_graph_context_init; @@ -1431,6 +1546,37 @@ static uint32_t nv40_ctx_voodoo[] = { ~0 }; +static uint32_t nv41_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, + 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, + 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, + 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800, + 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, + 0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, + 0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, + 0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, + 0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480, + 0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a, + 0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001, + 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, + 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, + 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, + 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020, + 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, + 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 +}; + static uint32_t nv43_ctx_voodoo[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, @@ -1683,6 +1829,7 @@ nv40_graph_init(struct drm_device *dev) switch (dev_priv->chipset) { case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; + case 0x41: ctx_voodoo = nv41_ctx_voodoo; break; case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; case 0x44: ctx_voodoo = nv44_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; From ff9a019cf06b7ebaf2fa8dee8e37c866ca4623af Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Thu, 6 Sep 2007 02:12:05 +0200 Subject: [PATCH 301/437] nouveau: add pure nv30 support. --- shared-core/nv30_graph.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index ca43bb95..590a5c33 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -8,9 +8,8 @@ #include "nouveau_drm.h" /* - * There are 4 families : - * NV30 is 0x10de:0x030* (not working, no dump for that one) - * + * There are 3 families : + * NV30 is 0x10de:0x030* * NV31 is 0x10de:0x031* * * NV34 is 0x10de:0x032* @@ -25,11 +24,11 @@ */ -#define NV31_GRCTX_SIZE (22392) -#define NV34_GRCTX_SIZE (18140) -#define NV35_GRCTX_SIZE (22396) +#define NV30_31_GRCTX_SIZE (22392) +#define NV34_GRCTX_SIZE (18140) +#define NV35_36_GRCTX_SIZE (22396) -static void nv31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +static void nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; @@ -919,7 +918,8 @@ static void nv31_graph_context_init(struct drm_device *dev, struct nouveau_gpuob INSTANCE_WR(ctx, 0x3858/4, 0x40000000); INSTANCE_WR(ctx, 0x385c/4, 0x3f800000); INSTANCE_WR(ctx, 0x3864/4, 0xbf800000); - INSTANCE_WR(ctx, 0x386c/4, 0xbf800000);} + INSTANCE_WR(ctx, 0x386c/4, 0xbf800000); +} static void nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { @@ -1814,7 +1814,7 @@ static void nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuob INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000); } -static void nv35_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +static void nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; @@ -2715,9 +2715,10 @@ int nv30_graph_create_context(struct nouveau_channel *chan) int ret; switch (dev_priv->chipset) { + case 0x30: case 0x31: - ctx_size = NV31_GRCTX_SIZE; - ctx_init = nv31_graph_context_init; + ctx_size = NV30_31_GRCTX_SIZE; + ctx_init = nv30_31_graph_context_init; break; case 0x34: ctx_size = NV34_GRCTX_SIZE; @@ -2725,12 +2726,12 @@ int nv30_graph_create_context(struct nouveau_channel *chan) break; case 0x35: case 0x36: - ctx_size = NV35_GRCTX_SIZE; - ctx_init = nv35_graph_context_init; + ctx_size = NV35_36_GRCTX_SIZE; + ctx_init = nv35_36_graph_context_init; break; default: ctx_size = 0; - ctx_init = nv35_graph_context_init; + ctx_init = nv35_36_graph_context_init; DRM_ERROR("Please contact the devs if you want your NV%x card to work\n",dev_priv->chipset); break; } From edf5a86a269690b0e42a5cee7d4ac3828b42ca3e Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Thu, 6 Sep 2007 02:46:45 +0200 Subject: [PATCH 302/437] nouveau: fix some nv04 graph switching. --- shared-core/nv04_graph.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 213696ca..f1117cd6 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -358,14 +358,15 @@ void nouveau_nv04_context_switch(struct drm_device *dev) chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); last = dev_priv->fifos[chid]; - DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",last->id, next->id); + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",last->id, next->id); - NV_WRITE(NV03_PFIFO_CACHES, 0x0); +/* NV_WRITE(NV03_PFIFO_CACHES, 0x0); NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); - NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0); + NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/ NV_WRITE(NV04_PGRAPH_FIFO,0x0); - nv04_graph_save_context(last); + if (last) + nv04_graph_save_context(last); nouveau_wait_for_idle(dev); @@ -374,16 +375,16 @@ void nouveau_nv04_context_switch(struct drm_device *dev) nouveau_wait_for_idle(dev); - nv04_graph_load_context(last); + nv04_graph_load_context(next); NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24); NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF); - NV_WRITE(NV04_PGRAPH_FIFO,0x0); +/* NV_WRITE(NV04_PGRAPH_FIFO,0x0); NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1); - NV_WRITE(NV03_PFIFO_CACHES, 0x1); + NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/ NV_WRITE(NV04_PGRAPH_FIFO,0x1); } From c597bd57eee3ea05a3b8c851615c7351d0b32fce Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 6 Sep 2007 15:20:52 -0700 Subject: [PATCH 303/437] Bump version to 1.0.0. --- linux-core/xgi_drv.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index f2768d1b..88ade64d 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,10 +35,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070814" +#define DRIVER_DATE "20070906" -#define DRIVER_MAJOR 0 -#define DRIVER_MINOR 13 +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" From 06bb07259531d10df2c1979919af899e3812057b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 7 Sep 2007 20:07:13 +1000 Subject: [PATCH 304/437] nouveau: Use nv41 ctxprog/vals on nv42. --- shared-core/nv40_graph.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 99c77cb9..3f3df515 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1349,6 +1349,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) ctx_init = nv40_graph_context_init; break; case 0x41: + case 0x42: ctx_size = NV41_GRCTX_SIZE; ctx_init = nv41_graph_context_init; break; @@ -1829,7 +1830,8 @@ nv40_graph_init(struct drm_device *dev) switch (dev_priv->chipset) { case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; - case 0x41: ctx_voodoo = nv41_ctx_voodoo; break; + case 0x41: + case 0x42: ctx_voodoo = nv41_ctx_voodoo; break; case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; case 0x44: ctx_voodoo = nv44_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; From f19d80b0465d9ba93005d8499654e3256494c831 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Sat, 8 Sep 2007 22:19:00 +0200 Subject: [PATCH 305/437] nouveau: Add Quadro NVS 140 pciid --- shared-core/drm_pciids.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 39ddc1c5..74e7e75a 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -729,6 +729,7 @@ 0x10de 0x0421 NV_50 "GeForce 8500 GT" 0x10de 0x0422 NV_50 "GeForce 8400 GS" 0x10de 0x0423 NV_50 "GeForce 8300 GS" +0x10de 0x0429 NV_50 "Quadro NVS 140" 0x12d2 0x0020 NV_04 "TNT" 0x12d2 0x0028 NV_04 "TNT2" 0x12d2 0x0029 NV_04 "UTNT2" From b2ee72f4400999b2cf783256547fe8c7bfa698f5 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sun, 9 Sep 2007 12:13:00 +0200 Subject: [PATCH 306/437] nouveau : nv10 pipe ctx switch load/save. This fix some issues with more than one 3D fifo, but there still some "corruption" sometimes --- shared-core/nv10_graph.c | 329 +++++++++++++++++++++++++-------------- 1 file changed, 213 insertions(+), 116 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index e470ff06..f90ba05b 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -27,13 +27,68 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" +#define NV10_FIFO_NUMBER 32 -static void nv10_praph_pipe(struct drm_device *dev) { +struct pipe_state { + uint32_t pipe_0x0000[0x040/4]; + uint32_t pipe_0x0040[0x010/4]; + uint32_t pipe_0x0200[0x0c0/4]; + uint32_t pipe_0x4400[0x080/4]; + uint32_t pipe_0x6400[0x3b0/4]; + uint32_t pipe_0x6800[0x2f0/4]; + uint32_t pipe_0x6c00[0x030/4]; + uint32_t pipe_0x7000[0x130/4]; + uint32_t pipe_0x7400[0x0c0/4]; + uint32_t pipe_0x7800[0x0c0/4]; +}; + +/* TODO dynamic allocation ??? */ +static struct pipe_state pipe_state[NV10_FIFO_NUMBER]; + +static void nv10_graph_save_pipe(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pipe_state *fifo_pipe_state = pipe_state + chan->id; int i; +#define PIPE_SAVE(addr) \ + do { \ + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ + fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \ + } while (0) + + PIPE_SAVE(0x4400); + PIPE_SAVE(0x0200); + PIPE_SAVE(0x6400); + PIPE_SAVE(0x6800); + PIPE_SAVE(0x6c00); + PIPE_SAVE(0x7000); + PIPE_SAVE(0x7400); + PIPE_SAVE(0x7800); + PIPE_SAVE(0x0040); + PIPE_SAVE(0x0000); + +#undef PIPE_SAVE +} + +static void nv10_graph_load_pipe(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + int i; + uint32_t xfmode0, xfmode1; +#define PIPE_RESTORE(addr) \ + do { \ + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ + NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ + } while (0) + nouveau_wait_for_idle(dev); /* XXX check haiku comments */ + xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0); + xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1); NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000); NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); @@ -43,7 +98,6 @@ static void nv10_praph_pipe(struct drm_device *dev) { NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); - for (i = 0; i < 3; i++) NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); @@ -54,131 +108,176 @@ static void nv10_praph_pipe(struct drm_device *dev) { NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000200); - for (i = 0; i < 48; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + PIPE_RESTORE(0x0200); nouveau_wait_for_idle(dev); - NV_WRITE(NV10_PGRAPH_XFMODE0, 0x00000000); - NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006400); + /* restore XFMODE */ + NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0); + NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1); + PIPE_RESTORE(0x6400); + PIPE_RESTORE(0x6800); + PIPE_RESTORE(0x6c00); + PIPE_RESTORE(0x7000); + PIPE_RESTORE(0x7400); + PIPE_RESTORE(0x7800); + PIPE_RESTORE(0x4400); + PIPE_RESTORE(0x0000); + PIPE_RESTORE(0x0040); + nouveau_wait_for_idle(dev); + +#undef PIPE_RESTORE +} + +static void nv10_graph_create_pipe(struct nouveau_channel *chan) { + struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + uint32_t *fifo_pipe_state_addr; + int i; +#define PIPE_INIT(addr) \ + do { \ + fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ + } while (0) +#define PIPE_INIT_END(addr) \ + do { \ + if (fifo_pipe_state_addr != \ + sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \ + DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \ + sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \ + } while (0) +#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value + + PIPE_INIT(0x0200); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0200); + + PIPE_INIT(0x6400); for (i = 0; i < 211; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + PIPE_INIT_END(0x6400); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006800); + PIPE_INIT(0x6800); for (i = 0; i < 162; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); for (i = 0; i < 25; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6800); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006c00); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0xbf800000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca); + PIPE_INIT(0x6c00); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0xbf800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6c00); + + PIPE_INIT(0x7000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); for (i = 0; i < 35; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7000); - - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007400); + PIPE_INIT(0x7400); for (i = 0; i < 48; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7400); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007800); + PIPE_INIT(0x7800); for (i = 0; i < 48; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7800); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00004400); + PIPE_INIT(0x4400); for (i = 0; i < 32; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x4400); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000000); + PIPE_INIT(0x0000); for (i = 0; i < 16; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0000); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); + PIPE_INIT(0x0040); for (i = 0; i < 4; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0040); - nouveau_wait_for_idle(dev); +#undef PIPE_INIT +#undef PIPE_INIT_END +#undef NV_WRITE_PIPE_INIT } static int nv10_graph_ctx_regs [] = { @@ -555,6 +654,8 @@ int nv10_graph_load_context(struct nouveau_channel *chan) } NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24); + nv10_graph_load_pipe(chan); + return 0; } @@ -571,6 +672,8 @@ int nv10_graph_save_context(struct nouveau_channel *chan) chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); } + nv10_graph_save_pipe(chan); + return 0; } @@ -609,12 +712,13 @@ void nouveau_nv10_context_switch(struct drm_device *dev) DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", next->id); } else { - DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", + DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", last->id, next->id); } NV_WRITE(NV04_PGRAPH_FIFO,0x0); if (last) { + nouveau_wait_for_idle(dev); nv10_graph_save_context(last); } @@ -693,14 +797,7 @@ int nv10_graph_create_context(struct nouveau_channel *chan) { NV_WRITE_CTX(0x00400ed0, 0x00000080); } - /* for the first channel init the regs */ - if (dev_priv->fifo_alloc_count == 0) - nv10_graph_load_context(chan); - - - //XXX should be saved/restored for each fifo - //we supposed here we have X fifo and only one 3D fifo. - nv10_praph_pipe(dev); + nv10_graph_create_pipe(chan); return 0; } From 00bb534a546a4ca4bb6e167f5b387fa8156f4ca7 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sun, 9 Sep 2007 15:49:33 +0200 Subject: [PATCH 307/437] nouveau : nv10 fix NV10_PGRAPH_CTX_USER save/load --- shared-core/nv10_graph.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index f90ba05b..456bc5d3 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -652,7 +652,6 @@ int nv10_graph_load_context(struct nouveau_channel *chan) for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]); } - NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24); nv10_graph_load_pipe(chan); @@ -725,14 +724,12 @@ void nouveau_nv10_context_switch(struct drm_device *dev) nouveau_wait_for_idle(dev); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); - NV_WRITE(NV10_PGRAPH_CTX_USER, (NV_READ(NV10_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24)); nouveau_wait_for_idle(dev); nv10_graph_load_context(next); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); - //NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); NV_WRITE(NV04_PGRAPH_FIFO,0x1); } @@ -796,6 +793,7 @@ int nv10_graph_create_context(struct nouveau_channel *chan) { NV_WRITE_CTX(0x00400ec0, 0x00000080); NV_WRITE_CTX(0x00400ed0, 0x00000080); } + NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); nv10_graph_create_pipe(chan); return 0; From 0bd8752a0cb8afb7f29a5f659c3459aab42d9955 Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Mon, 10 Sep 2007 18:52:17 +0200 Subject: [PATCH 308/437] nouveau: nv10: add combiner registers --- shared-core/nouveau_reg.h | 12 ++++++++++++ shared-core/nv10_graph.c | 24 ++++++++++++------------ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index a1895c34..21133d98 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -319,6 +319,18 @@ #define NV47_PGRAPH_TSTATUS0(i) 0x00400D0C #define NV04_PGRAPH_V_RAM 0x00400D40 #define NV04_PGRAPH_W_RAM 0x00400D80 +#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 +#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 +#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 +#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C +#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 +#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 +#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 +#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C +#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 +#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 +#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 +#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C #define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 #define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 #define NV10_PGRAPH_XFMODE0 0x00400F40 diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 456bc5d3..1fd185a0 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -558,18 +558,18 @@ NV03_PGRAPH_CLIPX_0, NV03_PGRAPH_CLIPX_1, NV03_PGRAPH_CLIPY_0, NV03_PGRAPH_CLIPY_1, -0x00400e40, -0x00400e44, -0x00400e48, -0x00400e4c, -0x00400e50, -0x00400e54, -0x00400e58, -0x00400e5c, -0x00400e60, -0x00400e64, -0x00400e68, -0x00400e6c, +NV10_PGRAPH_COMBINER0_IN_ALPHA, +NV10_PGRAPH_COMBINER1_IN_ALPHA, +NV10_PGRAPH_COMBINER0_IN_RGB, +NV10_PGRAPH_COMBINER1_IN_RGB, +NV10_PGRAPH_COMBINER_COLOR0, +NV10_PGRAPH_COMBINER_COLOR1, +NV10_PGRAPH_COMBINER0_OUT_ALPHA, +NV10_PGRAPH_COMBINER1_OUT_ALPHA, +NV10_PGRAPH_COMBINER0_OUT_RGB, +NV10_PGRAPH_COMBINER1_OUT_RGB, +NV10_PGRAPH_COMBINER_FINAL0, +NV10_PGRAPH_COMBINER_FINAL1, 0x00400e00, 0x00400e04, 0x00400e08, From 3cb8acd5abcb410ab2982f55aec94b5a793a47d6 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 11 Sep 2007 03:48:46 -0700 Subject: [PATCH 309/437] Disambiguate planes & pipes for swap operations This mod makes the SAREA track plane to pipe mappings and corrects the name of the plane info variables (they were mislabeled as pipe info since until now all code assumed a direct mapping between planes and pipes). It also updates the flip ioctl argument to take a set of planes rather than pipes, since planes are flipped while pipes generate vblank events. --- shared-core/i915_dma.c | 46 ++++++++++++++--------------- shared-core/i915_drm.h | 21 ++++++++------ shared-core/i915_drv.h | 5 ++-- shared-core/i915_irq.c | 66 ++++++++++++++++++++++-------------------- 4 files changed, 73 insertions(+), 65 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 9f18feee..daa03df8 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -572,11 +572,11 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, return 0; } -static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync) +static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; u32 num_pages, current_page, next_page, dspbase; - int shift = 2 * pipe, x, y; + int shift = 2 * plane, x, y; RING_LOCALS; /* Calculate display base offset */ @@ -597,25 +597,25 @@ static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync) break; } - if (pipe == 0) { - x = dev_priv->sarea_priv->pipeA_x; - y = dev_priv->sarea_priv->pipeA_y; + if (plane == 0) { + x = dev_priv->sarea_priv->planeA_x; + y = dev_priv->sarea_priv->planeA_y; } else { - x = dev_priv->sarea_priv->pipeB_x; - y = dev_priv->sarea_priv->pipeB_y; + x = dev_priv->sarea_priv->planeB_x; + y = dev_priv->sarea_priv->planeB_y; } dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp; - DRM_DEBUG("pipe=%d current_page=%d dspbase=0x%x\n", pipe, current_page, + DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page, dspbase); BEGIN_LP_RING(4); OUT_RING(sync ? 0 : - (MI_WAIT_FOR_EVENT | (pipe ? MI_WAIT_FOR_PLANE_B_FLIP : + (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP : MI_WAIT_FOR_PLANE_A_FLIP))); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) | - (pipe ? DISPLAY_PLANE_B : DISPLAY_PLANE_A)); + (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A)); OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp); OUT_RING(dspbase); ADVANCE_LP_RING(); @@ -624,19 +624,19 @@ static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync) dev_priv->sarea_priv->pf_current_page |= next_page << shift; } -void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync) +void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; int i; - DRM_DEBUG("%s: pipes=0x%x pfCurrentPage=%d\n", + DRM_DEBUG("%s: planes=0x%x pfCurrentPage=%d\n", __FUNCTION__, - pipes, dev_priv->sarea_priv->pf_current_page); + planes, dev_priv->sarea_priv->pf_current_page); i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH); for (i = 0; i < 2; i++) - if (pipes & (1 << i)) + if (planes & (1 << i)) i915_do_dispatch_flip(dev, i, sync); i915_emit_breadcrumb(dev); @@ -728,21 +728,21 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, static int i915_do_cleanup_pageflip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int i, pipes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; + int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; DRM_DEBUG("%s\n", __FUNCTION__); - for (i = 0, pipes = 0; i < 2; i++) + for (i = 0, planes = 0; i < 2; i++) if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) { dev_priv->sarea_priv->pf_current_page = (dev_priv->sarea_priv->pf_current_page & ~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i); - pipes |= 1 << i; + planes |= 1 << i; } - if (pipes) - i915_dispatch_flip(dev, pipes, 0); + if (planes) + i915_dispatch_flip(dev, planes, 0); return 0; } @@ -755,13 +755,13 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *f LOCK_TEST_WITH_RETURN(dev, file_priv); - if (param->pipes & ~0x3) { - DRM_ERROR("Invalid pipes 0x%x, only <= 0x3 is valid\n", - param->pipes); + if (param->planes & ~0x3) { + DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n", + param->planes); return -EINVAL; } - i915_dispatch_flip(dev, param->pipes, 0); + i915_dispatch_flip(dev, param->planes, 0); return 0; } diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 3a90df6e..575b182a 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -105,14 +105,17 @@ typedef struct _drm_i915_sarea { unsigned int rotated_tiled; unsigned int rotated2_tiled; - int pipeA_x; - int pipeA_y; - int pipeA_w; - int pipeA_h; - int pipeB_x; - int pipeB_y; - int pipeB_w; - int pipeB_h; + int planeA_x; + int planeA_y; + int planeA_w; + int planeA_h; + int planeB_x; + int planeB_y; + int planeB_w; + int planeB_h; + + int planeA_pipe; + int planeB_pipe; /* Triple buffering */ drm_handle_t third_handle; @@ -182,7 +185,7 @@ typedef struct _drm_i915_sarea { /* Asynchronous page flipping: */ typedef struct drm_i915_flip { - int pipes; + int planes; } drm_i915_flip_t; /* Allow drivers to submit batchbuffers directly to hardware, relying diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index aff03bee..c5f51897 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -55,10 +55,11 @@ * - Support vertical blank on secondary display pipe * 1.8: New ioctl for ARB_Occlusion_Query * 1.9: Usable page flipping and triple buffering + * 1.10: Plane/pipe disentangling */ #define DRIVER_MAJOR 1 #if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) -#define DRIVER_MINOR 9 +#define DRIVER_MINOR 10 #else #define DRIVER_MINOR 6 #endif @@ -87,7 +88,7 @@ struct mem_block { typedef struct _drm_i915_vbl_swap { struct list_head head; drm_drawable_t drw_id; - unsigned int pipe; + unsigned int plane; unsigned int sequence; int flip; } drm_i915_vbl_swap_t; diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 1056b3e6..72c61876 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -44,28 +44,28 @@ */ static void i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, - int pipe) + int plane) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; u16 x1, y1, x2, y2; - int pf_pipes = 1 << pipe; + int pf_planes = 1 << plane; DRM_SPINLOCK_ASSERT(&dev->drw_lock); - /* If the window is visible on the other pipe, we have to flip on that - * pipe as well. + /* If the window is visible on the other plane, we have to flip on that + * plane as well. */ - if (pipe == 1) { - x1 = sarea_priv->pipeA_x; - y1 = sarea_priv->pipeA_y; - x2 = x1 + sarea_priv->pipeA_w; - y2 = y1 + sarea_priv->pipeA_h; + if (plane == 1) { + x1 = sarea_priv->planeA_x; + y1 = sarea_priv->planeA_y; + x2 = x1 + sarea_priv->planeA_w; + y2 = y1 + sarea_priv->planeA_h; } else { - x1 = sarea_priv->pipeB_x; - y1 = sarea_priv->pipeB_y; - x2 = x1 + sarea_priv->pipeB_w; - y2 = y1 + sarea_priv->pipeB_h; + x1 = sarea_priv->planeB_x; + y1 = sarea_priv->planeB_y; + x2 = x1 + sarea_priv->planeB_w; + y2 = y1 + sarea_priv->planeB_h; } if (x2 > 0 && y2 > 0) { @@ -75,13 +75,13 @@ i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, for (i = 0; i < num_rects; i++) if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || rect[i].x2 <= x1 || rect[i].y2 <= y1)) { - pf_pipes = 0x3; + pf_planes = 0x3; break; } } - i915_dispatch_flip(dev, pf_pipes, 1); + i915_dispatch_flip(dev, pf_planes, 1); } /** @@ -124,8 +124,10 @@ static void i915_vblank_tasklet(struct drm_device *dev) list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { drm_i915_vbl_swap_t *vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); + int pipe = vbl_swap->plane ? sarea_priv->planeB_pipe : + sarea_priv->planeA_pipe; - if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) + if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) continue; list_del(list); @@ -176,10 +178,10 @@ static void i915_vblank_tasklet(struct drm_device *dev) i915_kernel_lost_context(dev); upper[0] = upper[1] = 0; - slice[0] = max(sarea_priv->pipeA_h / nhits, 1); - slice[1] = max(sarea_priv->pipeB_h / nhits, 1); - lower[0] = sarea_priv->pipeA_y + slice[0]; - lower[1] = sarea_priv->pipeB_y + slice[0]; + slice[0] = max(sarea_priv->planeA_h / nhits, 1); + slice[1] = max(sarea_priv->planeB_h / nhits, 1); + lower[0] = sarea_priv->planeA_y + slice[0]; + lower[1] = sarea_priv->planeB_y + slice[0]; offsets[0] = sarea_priv->front_offset; offsets[1] = sarea_priv->back_offset; @@ -205,7 +207,7 @@ static void i915_vblank_tasklet(struct drm_device *dev) drm_i915_vbl_swap_t *swap_hit = list_entry(hit, drm_i915_vbl_swap_t, head); struct drm_clip_rect *rect; - int num_rects, pipe, front, back; + int num_rects, plane, front, back; unsigned short top, bottom; drw = drm_get_drawable_info(dev, swap_hit->drw_id); @@ -213,10 +215,10 @@ static void i915_vblank_tasklet(struct drm_device *dev) if (!drw) continue; - pipe = swap_hit->pipe; + plane = swap_hit->plane; if (swap_hit->flip) { - i915_dispatch_vsync_flip(dev, drw, pipe); + i915_dispatch_vsync_flip(dev, drw, plane); continue; } @@ -238,11 +240,11 @@ static void i915_vblank_tasklet(struct drm_device *dev) } rect = drw->rects; - top = upper[pipe]; - bottom = lower[pipe]; + top = upper[plane]; + bottom = lower[plane]; front = (dev_priv->sarea_priv->pf_current_page >> - (2 * pipe)) & 0x3; + (2 * plane)) & 0x3; back = (front + 1) % num_pages; for (num_rects = drw->num_rects; num_rects--; rect++) { @@ -560,9 +562,10 @@ int i915_vblank_swap(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_vblank_swap_t *swap = data; drm_i915_vbl_swap_t *vbl_swap; - unsigned int pipe, seqtype, curseq; + unsigned int pipe, seqtype, curseq, plane; unsigned long irqflags; struct list_head *list; + drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __func__); @@ -581,7 +584,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } - pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; + plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; + pipe = plane ? sarea_priv->planeB_pipe : sarea_priv->planeA_pipe; seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); @@ -624,7 +628,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } - i915_dispatch_vsync_flip(dev, drw, pipe); + i915_dispatch_vsync_flip(dev, drw, plane); DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); @@ -638,7 +642,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); if (vbl_swap->drw_id == swap->drawable && - vbl_swap->pipe == pipe && + vbl_swap->plane == plane && vbl_swap->sequence == swap->sequence) { vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); @@ -664,7 +668,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, DRM_DEBUG("\n"); vbl_swap->drw_id = swap->drawable; - vbl_swap->pipe = pipe; + vbl_swap->plane = plane; vbl_swap->sequence = swap->sequence; vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); From 852232fb803bef92b12136be2766ddee3e3613b2 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 12 Sep 2007 08:55:33 -0700 Subject: [PATCH 310/437] Remove plane->pipe mapping from SAREA private after all We can figure out which pipe a given plane is mapped to by looking at the display control registers instead of tracking it in a new SAREA private field. If this becomes a performance problem, we could move to an ioctl based solution by adding a new parameter for the DDX to set (defaulting to the old behavior if the param was never set of course). --- shared-core/i915_drm.h | 3 --- shared-core/i915_drv.h | 5 +++++ shared-core/i915_irq.c | 26 ++++++++++++++++++++++---- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 575b182a..a57ffa73 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -114,9 +114,6 @@ typedef struct _drm_i915_sarea { int planeB_w; int planeB_h; - int planeA_pipe; - int planeB_pipe; - /* Triple buffering */ drm_handle_t third_handle; int third_offset; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index c5f51897..899817ec 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -490,6 +490,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) +/* Display regs */ +#define DSPACNTR 0x70180 +#define DSPBCNTR 0x71180 +#define DISPPLANE_SEL_PIPE_MASK (1<<24) + /* Define the region of interest for the binner: */ #define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 72c61876..804e3fb1 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -37,6 +37,26 @@ #define MAX_NOPID ((u32)~0) +/** + * i915_get_pipe - return the the pipe associated with a given plane + * @dev: DRM device + * @plane: plane to look for + * + * We need to get the pipe associated with a given plane to correctly perform + * vblank driven swapping, and they may not always be equal. So look up the + * pipe associated with @plane here. + */ +static int +i915_get_pipe(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 dspcntr; + + dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); + + return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; +} + /** * Emit a synchronous flip. * @@ -124,8 +144,7 @@ static void i915_vblank_tasklet(struct drm_device *dev) list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { drm_i915_vbl_swap_t *vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); - int pipe = vbl_swap->plane ? sarea_priv->planeB_pipe : - sarea_priv->planeA_pipe; + int pipe = i915_get_pipe(dev, vbl_swap->plane); if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) continue; @@ -565,7 +584,6 @@ int i915_vblank_swap(struct drm_device *dev, void *data, unsigned int pipe, seqtype, curseq, plane; unsigned long irqflags; struct list_head *list; - drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __func__); @@ -585,7 +603,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, } plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; - pipe = plane ? sarea_priv->planeB_pipe : sarea_priv->planeA_pipe; + pipe = i915_get_pipe(dev, plane); seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); From c453135789597648ef5aa641c4e59bb5b5e320de Mon Sep 17 00:00:00 2001 From: Brian Date: Wed, 12 Sep 2007 11:48:48 -0600 Subject: [PATCH 311/437] Added idr_replace() function which was apparently added in Linux 2.6.18 Someone should probably double-check my work here since this is the first time I've touched drm_compat.[ch] --- linux-core/drm_compat.c | 47 +++++++++++++++++++++++++++++++++++++++++ linux-core/drm_compat.h | 5 +++++ 2 files changed, 52 insertions(+) diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 9a6da7e9..e51aedb7 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -678,4 +678,51 @@ void idr_remove_all(struct idr *idp) idp->layers = 0; } EXPORT_SYMBOL(idr_remove_all); + +#endif /* DRM_IDR_COMPAT_FN */ + + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +/** + * idr_replace - replace pointer for given id + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @id: lookup key + * + * Replace the pointer registered with an id and return the old value. + * A -ENOENT return indicates that @id was not found. + * A -EINVAL return indicates that @id was not within valid constraints. + * + * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). + */ +void *idr_replace(struct idr *idp, void *ptr, int id) +{ + int n; + struct idr_layer *p, *old_p; + + n = idp->layers * IDR_BITS; + p = idp->top; + + id &= MAX_ID_MASK; + + if (id >= (1 << n)) + return ERR_PTR(-EINVAL); + + n -= IDR_BITS; + while ((n > 0) && p) { + p = p->ary[(id >> n) & IDR_MASK]; + n -= IDR_BITS; + } + + n = id & IDR_MASK; + if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) + return ERR_PTR(-ENOENT); + + old_p = p->ary[n]; + p->ary[n] = ptr; + + return (void *)old_p; +} +EXPORT_SYMBOL(idr_replace); #endif diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 0b00ba47..94db8533 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -316,4 +316,9 @@ int idr_for_each(struct idr *idp, void idr_remove_all(struct idr *idp); #endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +void *idr_replace(struct idr *idp, void *ptr, int id); +#endif + #endif From 41345b95a2cdc1e509171d31fc8aed8cecb43dbd Mon Sep 17 00:00:00 2001 From: Brian Date: Wed, 12 Sep 2007 12:05:15 -0600 Subject: [PATCH 312/437] Added bool typedef added in kernel 2.6.19 This allows the xgi code to compile with older kernels. --- linux-core/drm_compat.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 94db8533..870f8b73 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -321,4 +321,8 @@ void idr_remove_all(struct idr *idp); void *idr_replace(struct idr *idp, void *ptr, int id); #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +typedef _Bool bool; +#endif + #endif From e7d4a26913ba3a4949ac36280925062948ee21ce Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 18 Sep 2007 11:03:08 -0700 Subject: [PATCH 313/437] Fix ioc32 compat layer Previously any ioctls that weren't explicitly listed in the compat ioctl table would fail with ENOTTY. If the incoming ioctl number is outside the range of the table, assume that it Just Works, and pass it off to drm_ioctl. This make the fence related ioctls work on 64-bit PowerPC. --- linux-core/drm_ioc32.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index 558376de..0188154e 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -1051,8 +1051,13 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) drm_ioctl_compat_t *fn; int ret; + + /* Assume that ioctls without an explicit compat routine will "just + * work". This may not always be a good assumption, but it's better + * than always failing. + */ if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) - return -ENOTTY; + return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); fn = drm_compat_ioctls[nr]; From a3881ad2fef99aaf0a863609a847020ea822798c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 18 Sep 2007 11:03:49 -0700 Subject: [PATCH 314/437] Add ioc32 compat layer for XGI DRM. --- linux-core/Makefile.kernel | 3 +- linux-core/xgi_drv.c | 5 +- linux-core/xgi_drv.h | 7 +- linux-core/xgi_fb.c | 7 ++ linux-core/xgi_ioc32.c | 140 +++++++++++++++++++++++++++++++++++++ shared-core/xgi_drm.h | 2 +- 6 files changed, 159 insertions(+), 5 deletions(-) create mode 100644 linux-core/xgi_ioc32.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index ac77941e..b282bd05 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -48,6 +48,7 @@ mga-objs += mga_ioc32.o r128-objs += r128_ioc32.o i915-objs += i915_ioc32.o nouveau-objs += nouveau_ioc32.o +xgi-objs += xgi_ioc32.o endif obj-m += drm.o @@ -64,4 +65,4 @@ obj-$(CONFIG_DRM_VIA) += via.o obj-$(CONFIG_DRM_MACH64)+= mach64.o obj-$(CONFIG_DRM_NV) += nv.o obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o -obj-$(CONFIG_DRM_XGI) += xgi.o \ No newline at end of file +obj-$(CONFIG_DRM_XGI) += xgi.o diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 84547f62..bc6873a9 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -48,7 +48,7 @@ static struct drm_fence_driver xgi_fence_driver = { .has_irq = xgi_fence_has_irq }; -static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); +int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -97,6 +97,9 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = xgi_compat_ioctl, +#endif }, .pci_driver = { diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 88ade64d..a68dc03b 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,10 +35,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070906" +#define DRIVER_DATE "20070918" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 +#define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" @@ -78,6 +78,9 @@ struct xgi_info { unsigned next_sequence; }; +extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 40f39fbc..2e2d0094 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -65,6 +65,13 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, alloc->hw_addr = alloc->offset; alloc->index = block->user_hash.key; + if (block->user_hash.key != (unsigned long) alloc->index) { + DRM_ERROR("%s truncated handle %lx for pool %d " + "offset %x\n", + __func__, block->user_hash.key, + alloc->location, alloc->offset); + } + if (alloc->location == XGI_MEMLOC_NON_LOCAL) { alloc->hw_addr += info->pcie.base; } diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c new file mode 100644 index 00000000..c54044fa --- /dev/null +++ b/linux-core/xgi_ioc32.c @@ -0,0 +1,140 @@ +/* + * (C) Copyright IBM Corporation 2007 + * Copyright (C) Paul Mackerras 2005. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ian Romanick + */ + +#include + +#include "drmP.h" +#include "drm.h" + +#include "xgi_drm.h" + +/* This is copied from drm_ioc32.c. + */ +struct drm_map32 { + u32 offset; /**< Requested physical address (0 for SAREA)*/ + u32 size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + u32 handle; /**< User-space: "Handle" to pass to mmap() */ + int mtrr; /**< MTRR slot used */ +}; + +struct drm32_xgi_bootstrap { + struct drm_map32 gart; +}; + + +extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); + +static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg; + struct drm32_xgi_bootstrap bs32; + struct xgi_bootstrap __user *bs; + int err; + void *handle; + + + if (copy_from_user(&bs32, argp, sizeof(bs32))) { + return -EFAULT; + } + + bs = compat_alloc_user_space(sizeof(*bs)); + if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) { + return -EFAULT; + } + + if (__put_user(bs32.gart.offset, &bs->gart.offset) + || __put_user(bs32.gart.size, &bs->gart.size) + || __put_user(bs32.gart.type, &bs->gart.type) + || __put_user(bs32.gart.flags, &bs->gart.flags)) { + return -EFAULT; + } + + err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP, + (unsigned long)bs); + if (err) { + return err; + } + + if (__get_user(bs32.gart.offset, &bs->gart.offset) + || __get_user(bs32.gart.mtrr, &bs->gart.mtrr) + || __get_user(handle, &bs->gart.handle)) { + return -EFAULT; + } + + bs32.gart.handle = (unsigned long)handle; + if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) { + printk(KERN_ERR "%s truncated handle %p for type %d " + "offset %x\n", + __func__, handle, bs32.gart.type, bs32.gart.offset); + } + + if (copy_to_user(argp, &bs32, sizeof(bs32))) { + return -EFAULT; + } + + return 0; +} + + +drm_ioctl_compat_t *xgi_compat_ioctls[] = { + [DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long xgi_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + const unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls)) + fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); + ret = (fn != NULL) + ? (*fn)(filp, cmd, arg) + : drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index d8715df5..de0fb532 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -90,7 +90,7 @@ struct xgi_mem_alloc { * * See also DRM_XGI_FREE ioctl. */ - unsigned long index; + __u32 index; }; enum xgi_batch_type { From 78d111fa967d18e7f9f9b2acd26aff20b884eb6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 18 Sep 2007 20:55:43 +0100 Subject: [PATCH 315/437] i915: Fix scheduled buffer swaps. One instance of unlocking a spinlock was converted incorrectly when this code was fixed to build on BSD. --- shared-core/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 804e3fb1..ea84c708 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -188,7 +188,7 @@ static void i915_vblank_tasklet(struct drm_device *dev) DRM_SPINLOCK(&dev_priv->swaps_lock); } - DRM_SPINUNLOCK(&dev->drw_lock); + DRM_SPINUNLOCK(&dev_priv->swaps_lock); if (nhits == 0) { return; From e349b58b4a6ebfe299720cb921039a600c145e65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 18 Sep 2007 21:03:22 +0100 Subject: [PATCH 316/437] i915: Reinstate check that drawable has valid information in i915_vblank_swap. --- shared-core/i915_irq.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index ea84c708..7baa23c0 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -612,6 +612,21 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } + DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); + + /* It makes no sense to schedule a swap for a drawable that doesn't have + * valid information at this point. E.g. this could mean that the X + * server is too old to push drawable information to the DRM, in which + * case all such swaps would become ineffective. + */ + if (!drm_get_drawable_info(dev, swap->drawable)) { + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); + DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); + return -EINVAL; + } + + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); + curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); if (seqtype == _DRM_VBLANK_RELATIVE) From bc5423f16838257a040a55b88df9588d268fda06 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 20 Sep 2007 14:01:29 +1000 Subject: [PATCH 317/437] drm_sysfs: update sysfs code from kernel --- linux-core/drmP.h | 9 +-- linux-core/drm_stub.c | 2 +- linux-core/drm_sysfs.c | 163 +++++++++++++---------------------------- 3 files changed, 54 insertions(+), 120 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index aa562225..f8ca3f4b 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1130,7 +1130,7 @@ extern int drm_put_head(struct drm_head * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; extern struct drm_head **drm_heads; -extern struct drm_sysfs_class *drm_class; +extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); @@ -1163,10 +1163,9 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; -extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, - char *name); -extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); -extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, +extern struct class *drm_sysfs_create(struct module *owner, char *name); +extern void drm_sysfs_destroy(struct class *cs); +extern struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head * head); extern void drm_sysfs_device_remove(struct class_device *class_dev); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index eba6deed..07ea91e0 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -51,7 +51,7 @@ module_param_named(cards_limit, drm_cards_limit, int, 0444); module_param_named(debug, drm_debug, int, 0600); struct drm_head **drm_heads; -struct drm_sysfs_class *drm_class; +struct class *drm_class; struct proc_dir_entry *drm_proc_root; static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 1090e719..cf4349b0 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -1,3 +1,4 @@ + /* * drm_sysfs.c - Modifications to drm_sysfs_class.c to support * extra sysfs attribute from DRM. Normal drm_sysfs_class @@ -15,38 +16,8 @@ #include #include -#include "drmP.h" #include "drm_core.h" - -struct drm_sysfs_class { - struct class_device_attribute attr; - struct class class; -}; -#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) - -struct simple_dev { - dev_t dev; - struct class_device class_dev; -}; -#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) - -static void release_simple_dev(struct class_device *class_dev) -{ - struct simple_dev *s_dev = to_simple_dev(class_dev); - kfree(s_dev); -} - -static ssize_t show_dev(struct class_device *class_dev, char *buf) -{ - struct simple_dev *s_dev = to_simple_dev(class_dev); - return print_dev_t(buf, s_dev->dev); -} - -static void drm_sysfs_class_release(struct class *class) -{ - struct drm_sysfs_class *cs = to_drm_sysfs_class(class); - kfree(cs); -} +#include "drmP.h" /* Display the version of drm_core. This doesn't work right in current design */ static ssize_t version_show(struct class *dev, char *buf) @@ -68,42 +39,27 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL); * Note, the pointer created here is to be destroyed when finished by making a * call to drm_sysfs_destroy(). */ -struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) +struct class *drm_sysfs_create(struct module *owner, char *name) { - struct drm_sysfs_class *cs; - int retval; + struct class *class; + int err; - cs = kmalloc(sizeof(*cs), GFP_KERNEL); - if (!cs) { - retval = -ENOMEM; - goto error; + class = class_create(owner, name); + if (IS_ERR(class)) { + err = PTR_ERR(class); + goto err_out; } - memset(cs, 0x00, sizeof(*cs)); - cs->class.name = name; - cs->class.class_release = drm_sysfs_class_release; - cs->class.release = release_simple_dev; + err = class_create_file(class, &class_attr_version); + if (err) + goto err_out_class; - cs->attr.attr.name = "dev"; - cs->attr.attr.mode = S_IRUGO; - cs->attr.attr.owner = owner; - cs->attr.show = show_dev; - cs->attr.store = NULL; + return class; - retval = class_register(&cs->class); - if (retval) - goto error; - retval = class_create_file(&cs->class, &class_attr_version); - if (retval) - goto error_with_class; - - return cs; - - error_with_class: - class_unregister(&cs->class); - error: - kfree(cs); - return ERR_PTR(retval); +err_out_class: + class_destroy(class); +err_out: + return ERR_PTR(err); } /** @@ -113,12 +69,13 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) * Note, the pointer to be destroyed must have been created with a call to * drm_sysfs_create(). */ -void drm_sysfs_destroy(struct drm_sysfs_class *cs) +void drm_sysfs_destroy(struct class *class) { - if ((cs == NULL) || (IS_ERR(cs))) + if ((class == NULL) || (IS_ERR(class))) return; - class_unregister(&cs->class); + class_remove_file(class, &class_attr_version); + class_destroy(class); } static ssize_t show_dri(struct class_device *class_device, char *buf) @@ -135,7 +92,7 @@ static struct class_device_attribute class_device_attrs[] = { /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver - * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. + * @cs: pointer to the struct class that this device should be registered to. * @dev: the dev_t for the device to be added. * @device: a pointer to a struct device that is assiociated with this class device. * @fmt: string for the class device's name @@ -144,62 +101,42 @@ static struct class_device_attribute class_device_attrs[] = { * class. A "dev" file will be created, showing the dev_t for the device. The * pointer to the struct class_device will be returned from the call. Any further * sysfs files that might be required can be created using this pointer. - * Note: the struct drm_sysfs_class passed to this function must have previously been + * Note: the struct class passed to this function must have previously been * created with a call to drm_sysfs_create(). */ -struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - struct drm_head * head) +struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head) { - struct simple_dev *s_dev = NULL; - int i, retval; + struct class_device *class_dev; + int i, j, err; - if ((cs == NULL) || (IS_ERR(cs))) { - retval = -ENODEV; - goto error; + class_dev = class_device_create(cs, NULL, + MKDEV(DRM_MAJOR, head->minor), + &(head->dev->pdev)->dev, + "card%d", head->minor); + if (IS_ERR(class_dev)) { + err = PTR_ERR(class_dev); + goto err_out; } - s_dev = kmalloc(sizeof(*s_dev), GFP_KERNEL); - if (!s_dev) { - retval = -ENOMEM; - goto error; - } - memset(s_dev, 0x00, sizeof(*s_dev)); - - s_dev->dev = MKDEV(DRM_MAJOR, head->minor); - s_dev->class_dev.dev = &head->dev->pdev->dev; - s_dev->class_dev.class = &cs->class; - - snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor); - retval = class_device_register(&s_dev->class_dev); - if (retval) - goto error; - - retval = class_device_create_file(&s_dev->class_dev, &cs->attr); - if (retval) - goto error_with_device; - - class_set_devdata(&s_dev->class_dev, head); + class_set_devdata(class_dev, head); for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { - retval = class_device_create_file(&s_dev->class_dev, - &class_device_attrs[i]); - if (retval) - goto error_with_files; + err = class_device_create_file(class_dev, + &class_device_attrs[i]); + if (err) + goto err_out_files; } - return &s_dev->class_dev; + return class_dev; - error_with_files: - while (i > 0) - class_device_remove_file(&s_dev->class_dev, - &class_device_attrs[--i]); - class_device_remove_file(&s_dev->class_dev, &cs->attr); - error_with_device: - class_device_unregister(&s_dev->class_dev); - error: - kfree(s_dev); - - return ERR_PTR(retval); +err_out_files: + if (i > 0) + for (j = 0; j < i; j++) + class_device_remove_file(class_dev, + &class_device_attrs[i]); + class_device_unregister(class_dev); +err_out: + return ERR_PTR(err); } /** @@ -211,11 +148,9 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, */ void drm_sysfs_device_remove(struct class_device *class_dev) { - struct simple_dev *s_dev = to_simple_dev(class_dev); int i; for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]); - - class_device_unregister(&s_dev->class_dev); + class_device_remove_file(class_dev, &class_device_attrs[i]); + class_device_unregister(class_dev); } From 74c6f2f47a9977fef8fcc7c698862d5bd2f54336 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Fri, 21 Sep 2007 22:04:45 +0200 Subject: [PATCH 318/437] nouveau: add some checks to the nv04 graph switching code. --- shared-core/nv04_graph.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index f1117cd6..2cf052cf 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -352,13 +352,38 @@ void nouveau_nv04_context_switch(struct drm_device *dev) struct nouveau_channel *next, *last; int chid; + if (!dev) { + DRM_DEBUG("Invalid drm_device\n"); + return; + } + dev_priv = dev->dev_private; + if (!dev_priv) { + DRM_DEBUG("Invalid drm_nouveau_private\n"); + return; + } + if (!dev_priv->fifos) { + DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); + return; + } + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); next = dev_priv->fifos[chid]; + if (!next) { + DRM_DEBUG("Invalid next channel\n"); + return; + } + chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); last = dev_priv->fifos[chid]; - DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",last->id, next->id); + if (!last) { + DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", + next->id); + } else { + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", + last->id, next->id); + } /* NV_WRITE(NV03_PFIFO_CACHES, 0x0); NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); From dc60c452e6ac72ebc4e5c73153d4d9d8c9edfae5 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Fri, 21 Sep 2007 22:27:53 +0200 Subject: [PATCH 319/437] nouveau: fix notifiers on PPC. --- shared-core/nouveau_notifier.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 71b8cbe1..fbd4b56f 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -37,9 +37,12 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) int flags, ret; /*TODO: PCI notifier blocks */ +#if defined(__powerpc__) if (dev_priv->agp_heap) flags = NOUVEAU_MEM_AGP; - else if (dev_priv->pci_heap) + else +#endif + if (dev_priv->pci_heap) flags = NOUVEAU_MEM_PCI; else flags = NOUVEAU_MEM_FB; From 7587e9682c1b70930c015915d588b42ccd00c7c4 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Fri, 21 Sep 2007 22:42:39 +0200 Subject: [PATCH 320/437] nouveau: fix ppc and get it right this time. --- shared-core/nouveau_notifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index fbd4b56f..d3b79683 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -37,7 +37,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) int flags, ret; /*TODO: PCI notifier blocks */ -#if defined(__powerpc__) +#ifndef __powerpc__ if (dev_priv->agp_heap) flags = NOUVEAU_MEM_AGP; else From 3c995c2c4d2530e5bd01548764b20c4d062fd7a5 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 21 Sep 2007 15:58:02 -0700 Subject: [PATCH 321/437] Fix mapCount refcounting on unmap, even though the value is unused. --- libdrm/xf86drm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 63242bce..0849f896 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2874,6 +2874,7 @@ int drmBOUnmap(int fd, drmBO *buf) if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) { return -errno; } + buf->mapCount--; return 0; } From e7bfeb3031374653f7e55d67cc1b5c823849359f Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 21 Sep 2007 16:14:22 -0700 Subject: [PATCH 322/437] Add some more verbosity to drm_bo_set_pin_req comments. --- shared-core/drm.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/drm.h b/shared-core/drm.h index 28275169..30c7a1a3 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -792,7 +792,9 @@ struct drm_bo_set_pin_req { unsigned int handle; /** * - 0: Unpin the given buffer object. - * - 1: Pin the given buffer object. + * - 1: Pin the given buffer object, requiring that its offset and + * memory area stay constant until unpin. The intended use is for + * scanout buffers. */ unsigned int pin; }; From da63f4ba0f15c3ae614eba92c8219670c674727e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 13:34:33 +0200 Subject: [PATCH 323/437] Add fence error member. Modify the TTM backend bind arguments. Export a number of functions needed for driver-specific super-ioctls. Add a function to map buffer objects from the kernel, regardless of where they're currently placed. A number of error fixes. --- linux-core/drm_agpsupport.c | 15 +-- linux-core/drm_bo.c | 229 +++++++++++++++++++++++++----------- linux-core/drm_bo_move.c | 197 ++++++++++++++++++++++++++++++- linux-core/drm_fence.c | 154 ++++++++++++++---------- linux-core/drm_object.c | 4 + linux-core/drm_objects.h | 146 ++++++++++++++++++----- linux-core/drm_ttm.c | 12 +- linux-core/i915_buffer.c | 9 +- linux-core/i915_fence.c | 7 +- linux-core/nouveau_sgdma.c | 16 ++- linux-core/via_buffer.c | 3 +- linux-core/via_fence.c | 3 +- linux-core/xgi_fence.c | 2 +- shared-core/drm.h | 3 +- shared-core/i915_drv.h | 3 +- shared-core/via_drv.h | 3 +- 16 files changed, 611 insertions(+), 195 deletions(-) diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 4618823c..b68efc64 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -535,23 +535,23 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p } static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, - unsigned long offset, - int cached) + struct drm_bo_mem_reg *bo_mem) { - struct drm_agp_ttm_backend *agp_be = + struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; DRM_DEBUG("drm_agp_bind_ttm\n"); mem->is_flushed = TRUE; - mem->type = (cached) ? AGP_USER_CACHED_MEMORY : + mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; - ret = drm_agp_bind_memory(mem, offset); + ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); if (ret) { DRM_ERROR("AGP Bind memory failed\n"); } - DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0, + DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? + DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED); return ret; } @@ -643,7 +643,8 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) agp_be->bridge = dev->agp->bridge; agp_be->populated = FALSE; agp_be->backend.func = &agp_ttm_backend; - agp_be->backend.mem_type = DRM_BO_MEM_TT; + // agp_be->backend.mem_type = DRM_BO_MEM_TT; + agp_be->backend.dev = dev; return &agp_be->backend; } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b46d0361..1913df44 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -142,12 +142,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); - if (!bo->ttm) - ret = -ENOMEM; - break; case drm_bo_type_kernel: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -175,7 +171,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; - if (old_is_pci || new_is_pci) + if (old_is_pci || new_is_pci || + ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) return ret; @@ -190,9 +187,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, goto out_err; if (mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(bo->ttm, new_man->flags & - DRM_BO_FLAG_CACHED, - mem->mm_node->start); + ret = drm_bind_ttm(bo->ttm, mem); if (ret) goto out_err; } @@ -242,7 +237,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, _DRM_BO_FLAG_EVICTED); if (bo->mem.mm_node) - bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; + bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bm->man[bo->mem.mem_type].gpu_offset; + return 0; @@ -290,6 +287,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, } return 0; } +EXPORT_SYMBOL(drm_bo_wait); static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { @@ -417,7 +415,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo) atomic_dec(&bm->count); - BUG_ON(!list_empty(&bo->base.list)); + // BUG_ON(!list_empty(&bo->base.list)); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); return; @@ -503,6 +501,7 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) drm_bo_destroy_locked(tmp_bo); } } +EXPORT_SYMBOL(drm_bo_usage_deref_locked); static void drm_bo_base_deref_locked(struct drm_file * file_priv, struct drm_user_object * uo) @@ -531,38 +530,76 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) } EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); +void drm_putback_buffer_objects(struct drm_device *dev) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct list_head *list = &bm->unfenced; + struct drm_buffer_object *entry, *next; + + mutex_lock(&dev->struct_mutex); + list_for_each_entry_safe(entry, next, list, lru) { + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + + mutex_lock(&entry->mutex); + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); + mutex_lock(&dev->struct_mutex); + + list_del_init(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&entry->event_queue); + + /* + * FIXME: Might want to put back on head of list + * instead of tail here. + */ + + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_locked(&entry); + } + mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_putback_buffer_objects); + + /* * Note. The caller has to register (if applicable) * and deregister fence object usage. */ -int drm_fence_buffer_objects(struct drm_file * file_priv, +int drm_fence_buffer_objects(struct drm_device *dev, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { - struct drm_device *dev = file_priv->head->dev; struct drm_buffer_manager *bm = &dev->bm; - struct drm_buffer_object *entry; uint32_t fence_type = 0; + uint32_t fence_class = ~0; int count = 0; int ret = 0; struct list_head *l; - LIST_HEAD(f_list); mutex_lock(&dev->struct_mutex); if (!list) list = &bm->unfenced; + if (fence) + fence_class = fence->class; + list_for_each_entry(entry, list, lru) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); - fence_type |= entry->fence_type; - if (entry->fence_class != 0) { - DRM_ERROR("Fence class %d is not implemented yet.\n", - entry->fence_class); + fence_type |= entry->new_fence_type; + if (fence_class == ~0) + fence_class = entry->new_fence_class; + else if (entry->new_fence_class != fence_class) { + DRM_ERROR("Unmatching fence classes on unfenced list: " + "%d and %d.\n", + fence_class, + entry->new_fence_class); ret = -EINVAL; goto out; } @@ -574,14 +611,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, goto out; } - /* - * Transfer to a local list before we release the dev->struct_mutex; - * This is so we don't get any new unfenced objects while fencing - * the ones we already have.. - */ - - list_splice_init(list, &f_list); - if (fence) { if ((fence_type & fence->type) != fence_type) { DRM_ERROR("Given fence doesn't match buffers " @@ -591,7 +620,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } } else { mutex_unlock(&dev->struct_mutex); - ret = drm_fence_object_create(dev, 0, fence_type, + ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &fence); mutex_lock(&dev->struct_mutex); @@ -600,8 +629,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } count = 0; - l = f_list.next; - while (l != &f_list) { + l = list->next; + while (l != list) { prefetch(l->next); entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); @@ -614,6 +643,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); entry->fence = drm_fence_reference_locked(fence); + entry->fence_class = entry->new_fence_class; + entry->fence_type = entry->new_fence_type; DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); @@ -621,7 +652,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(&entry); - l = f_list.next; + l = list->next; } DRM_DEBUG("Fenced %d buffers\n", count); out: @@ -629,7 +660,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, *used_fence = fence; return ret; } - EXPORT_SYMBOL(drm_fence_buffer_objects); /* @@ -944,6 +974,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, atomic_inc(&bo->usage); return bo; } +EXPORT_SYMBOL(drm_lookup_buffer_object); /* * Call bo->mutex locked. @@ -1079,9 +1110,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, struct drm_bo_info_rep *rep) { + if (!rep) + return; + rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; - rep->size = bo->mem.num_pages * PAGE_SIZE; + rep->size = bo->num_pages * PAGE_SIZE; rep->offset = bo->offset; rep->arg_handle = bo->map_list.user_token; rep->mask = bo->mem.mask; @@ -1260,7 +1294,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, if (ret) return ret; - mem.num_pages = bo->mem.num_pages; + mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; mem.page_alignment = bo->mem.page_alignment; @@ -1308,7 +1342,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; if ((flag_diff & DRM_BO_FLAG_CACHED) && - (!(mem->mask & DRM_BO_FLAG_CACHED) || + (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { return 0; } @@ -1375,7 +1409,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, (unsigned long long) bo->mem.mask, (unsigned long long) bo->mem.flags); - ret = driver->fence_type(bo, &ftype); + ret = driver->fence_type(bo, &fence_class, &ftype); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); @@ -1404,13 +1438,15 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } - - bo->fence_class = fence_class; - bo->fence_type = ftype; - ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) - return ret; + bo->new_fence_class = fence_class; + bo->new_fence_type = ftype; + + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) { + DRM_ERROR("Timed out waiting for buffer unmap.\n"); + return ret; + } if (bo->type == drm_bo_type_fake) { ret = drm_bo_check_fake(dev, &bo->mem); if (ret) @@ -1465,11 +1501,52 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return 0; } -static int drm_bo_handle_validate(struct drm_file *file_priv, - uint32_t handle, - uint32_t fence_class, - uint64_t flags, uint64_t mask, uint32_t hint, - struct drm_bo_info_rep *rep) +int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep) +{ + int ret; + + mutex_lock(&bo->mutex); + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + + if (ret) + goto out; + + if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " + "processes\n"); + return -EPERM; + } + + + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); + ret = drm_bo_new_mask(bo, flags, hint); + if (ret) + goto out; + + ret = drm_buffer_object_validate(bo, + fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), + no_wait); +out: + if (rep) + drm_bo_fill_rep_arg(bo, rep); + + mutex_unlock(&bo->mutex); + return ret; +} +EXPORT_SYMBOL(drm_bo_do_validate); + + +int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, + struct drm_bo_info_rep * rep, + struct drm_buffer_object **bo_rep) { struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; @@ -1479,34 +1556,22 @@ static int drm_bo_handle_validate(struct drm_file *file_priv, mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); + if (!bo) { return -EINVAL; } - mutex_lock(&bo->mutex); - ret = drm_bo_wait_unfenced(bo, no_wait, 0); + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, + no_wait, rep); - if (ret) - goto out; + if (!ret && bo_rep) + *bo_rep = bo; + else + drm_bo_usage_deref_unlocked(&bo); - DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); - ret = drm_bo_new_mask(bo, flags, hint); - if (ret) - goto out; - - ret = - drm_buffer_object_validate(bo, fence_class, - !(hint & DRM_BO_HINT_DONT_FENCE), - no_wait); - drm_bo_fill_rep_arg(bo, rep); - - out: - - mutex_unlock(&bo->mutex); - - drm_bo_usage_deref_unlocked(&bo); return ret; } +EXPORT_SYMBOL(drm_bo_handle_validate); /** * Fills out the generic buffer object ioctl reply with the information for @@ -1612,8 +1677,9 @@ int drm_buffer_object_create(struct drm_device *dev, #endif bo->dev = dev; bo->type = type; + bo->num_pages = num_pages; bo->mem.mem_type = DRM_BO_MEM_LOCAL; - bo->mem.num_pages = num_pages; + bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; if (bo->type == drm_bo_type_fake) { @@ -1706,6 +1772,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct drm_bo_op_arg *arg = data; struct drm_bo_op_req *req = &arg->d.req; struct drm_bo_info_rep rep; + struct drm_buffer_object *dummy; unsigned long next = 0; void __user *curuserarg = NULL; int ret; @@ -1742,7 +1809,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, - &rep); + &rep, &dummy); break; case drm_bo_fence: ret = -EINVAL; @@ -2092,9 +2159,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev) struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; struct drm_buffer_object *entry; + struct drm_fence_object *fence; head = &bm->unfenced; + if (list_empty(head)) + return; + + DRM_ERROR("Clean unfenced\n"); + + if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) { + + /* + * Fixme: Should really wait here. + */ + } + + if (fence) + drm_fence_usage_deref_locked(&fence); + + if (list_empty(head)) + return; + + DRM_ERROR("Really clean unfenced\n"); + list = head->next; while(list != head) { prefetch(list->next); @@ -2254,7 +2342,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) if (!man->has_type) { DRM_ERROR("Trying to take down uninitialized " - "memory manager type\n"); + "memory manager type %u\n", mem_type); return ret; } man->use_type = 0; @@ -2276,6 +2364,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) return ret; } +EXPORT_SYMBOL(drm_bo_clean_mm); /** *Evict all buffers of a particular mem_type, but leave memory manager diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1a613916..dae99181 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -71,9 +71,7 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo, save_flags = old_mem->flags; } if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_CACHED, - new_mem->mm_node->start); + ret = drm_bind_ttm(ttm, new_mem); if (ret) return ret; } @@ -344,6 +342,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); + bo->fence_type = fence_type; if (ret) return ret; @@ -410,3 +409,195 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, } EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + +int drm_bo_same_page(unsigned long offset, + unsigned long offset2) +{ + return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); +} +EXPORT_SYMBOL(drm_bo_same_page); + +unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end) +{ + + offset = (offset + PAGE_SIZE) & PAGE_MASK; + return (end < offset) ? end : offset; +} +EXPORT_SYMBOL(drm_bo_offset_end); + + +static pgprot_t drm_kernel_io_prot(uint32_t map_type) +{ + pgprot_t tmp = PAGE_KERNEL; + +#if defined(__i386__) || defined(__x86_64__) +#ifdef USE_PAT_WC +#warning using pat + if (drm_use_pat() && map_type == _DRM_TTM) { + pgprot_val(tmp) |= _PAGE_PAT; + return tmp; + } +#endif + if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { + pgprot_val(tmp) |= _PAGE_PCD; + pgprot_val(tmp) &= ~_PAGE_PWT; + } +#elif defined(__powerpc__) + pgprot_val(tmp) |= _PAGE_NO_CACHE; + if (map_type == _DRM_REGISTERS) + pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) + if (map_type == _DRM_TTM) + tmp = pgprot_writecombine(tmp); + else + tmp = pgprot_noncached(tmp); +#endif + return tmp; +} + +static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, + unsigned long bus_offset, unsigned long bus_size, + struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { + map->bo_kmap_type = bo_map_premapped; + map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); + } else { + map->bo_kmap_type = bo_map_iomap; + map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + pgprot_t prot; + struct drm_ttm *ttm = bo->ttm; + struct page *d; + int i; + + BUG_ON(!ttm); + + /* + * Populate the part we're mapping; + */ + + for (i=start_page; i< num_pages; ++i) { + d = drm_ttm_get_page(ttm, i); + if (!d) + return -ENOMEM; + } + + if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { + + /* + * We're mapping a single page, and the desired + * page protection is consistent with the bo. + */ + + map->bo_kmap_type = bo_map_kmap; + map->page = drm_ttm_get_page(ttm, start_page); + map->virtual = kmap(map->page); + } else { + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contigous. + */ + + prot = (mem->flags & DRM_BO_FLAG_CACHED) ? + PAGE_KERNEL : + drm_kernel_io_prot(man->drm_bus_maptype); + map->bo_kmap_type = bo_map_vmap; + map->virtual = vmap(ttm->pages + start_page, + num_pages, 0, prot); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +/* + * This function is to be used for kernel mapping of buffer objects. + * It chooses the appropriate mapping method depending on the memory type + * and caching policy the buffer currently has. + * Mapping multiple pages or buffers that live in io memory is a bit slow and + * consumes vmalloc space. Be restrictive with such mappings. + * Mapping single pages usually returns the logical kernel address, (which is fast) + * BUG may use slower temporary mappings for high memory pages or + * uncached / write-combined pages. + * + * The function fills in a drm_bo_kmap_obj which can be used to return the + * kernel virtual address of the buffer. + * + * Code servicing a non-priviliged user request is only allowed to map one + * page at a time. We might need to implement a better scheme to stop such + * processes from consuming all vmalloc space. + */ + +int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + int ret; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + map->virtual = NULL; + + if (num_pages > bo->num_pages) + return -EINVAL; + if (start_page > bo->num_pages) + return -EINVAL; +#if 0 + if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) + return -EPERM; +#endif + ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, + &bus_offset, &bus_size); + + if (ret) + return ret; + + if (bus_size == 0) { + return drm_bo_kmap_ttm(bo, start_page, num_pages, map); + } else { + bus_offset += start_page << PAGE_SHIFT; + bus_size = num_pages << PAGE_SHIFT; + return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + } +} +EXPORT_SYMBOL(drm_bo_kmap); + +void drm_bo_kunmap(struct drm_bo_kmap_obj *map) +{ + if (!map->virtual) + return; + + switch(map->bo_kmap_type) { + case bo_map_iomap: + iounmap(map->virtual); + break; + case bo_map_vmap: + vunmap(map->virtual); + break; + case bo_map_kmap: + kunmap(map->page); + break; + case bo_map_premapped: + break; + default: + BUG(); + } + map->virtual = NULL; + map->page = NULL; +} +EXPORT_SYMBOL(drm_bo_kunmap); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 2f16f7ef..d228547c 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -35,7 +35,7 @@ */ void drm_fence_handler(struct drm_device * dev, uint32_t class, - uint32_t sequence, uint32_t type) + uint32_t sequence, uint32_t type, uint32_t error) { int wake = 0; uint32_t diff; @@ -49,6 +49,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; + diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff) @@ -57,9 +58,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, diff = (sequence - fc->last_exe_flush) & driver->sequence_mask; ge_last_exe = diff < driver->wrap_diff; - if (ge_last_exe) - fc->pending_flush &= ~type; - if (is_exe && ge_last_exe) { fc->last_exe_flush = sequence; } @@ -75,36 +73,66 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, } } + fc->pending_flush &= ~type; head = (found) ? &fence->ring : &fc->ring; list_for_each_entry_safe_reverse(fence, next, head, ring) { if (&fence->ring == &fc->ring) break; + if (error) { + fence->error = error; + fence->signaled = fence->type; + fence->submitted_flush = fence->type; + fence->flush_mask = fence->type; + list_del_init(&fence->ring); + wake = 1; + break; + } + type |= fence->native_type; relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { fence->signaled |= relevant; + fence->flush_mask |= relevant; + fence->submitted_flush |= relevant; DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", fence->base.hash.key, fence->signaled); - fence->submitted_flush |= relevant; wake = 1; } relevant = fence->flush_mask & - ~(fence->signaled | fence->submitted_flush); + ~(fence->submitted_flush | fence->signaled); - if (relevant) { - fc->pending_flush |= relevant; - fence->submitted_flush = fence->flush_mask; - } + fc->pending_flush |= relevant; + fence->submitted_flush |= relevant; if (!(fence->type & ~fence->signaled)) { DRM_DEBUG("Fence completely signaled 0x%08lx\n", fence->base.hash.key); list_del_init(&fence->ring); } + + } + + /* + * Reinstate lost flush flags. + */ + + if ((fc->pending_flush & type) != type) { + head = head->prev; + list_for_each_entry(fence, head, ring) { + if (&fence->ring == &fc->ring) + break; + diff = (fc->last_exe_flush - fence->sequence) & + driver->sequence_mask; + if (diff > driver->wrap_diff) + break; + + relevant = fence->submitted_flush & ~fence->signaled; + fc->pending_flush |= relevant; + } } if (wake) { @@ -141,6 +169,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } +EXPORT_SYMBOL(drm_fence_usage_deref_locked); void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { @@ -160,6 +189,7 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) mutex_unlock(&dev->struct_mutex); } } +EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src) @@ -178,7 +208,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, atomic_inc(&src->usage); mutex_unlock(&src->dev->struct_mutex); } - +EXPORT_SYMBOL(drm_fence_reference_unlocked); static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { @@ -206,6 +236,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence, return signaled; } +EXPORT_SYMBOL(drm_fence_object_signaled); static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, struct drm_fence_driver * driver, uint32_t sequence) @@ -241,7 +272,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence, write_lock_irqsave(&fm->lock, flags); fence->flush_mask |= type; - if (fence->submitted_flush == fence->signaled) { + if ((fence->submitted_flush & fence->signaled) + == fence->submitted_flush) { if ((fence->type & DRM_FENCE_TYPE_EXE) && !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) { drm_fence_flush_exe(fc, driver, fence->sequence); @@ -329,7 +361,15 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence, if (ret == -EBUSY) { DRM_ERROR("Fence timeout. " "GPU lockup or fence driver was " - "taken down.\n"); + "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n", + fence->class, + fence->sequence, + fence->type, + mask, + fence->signaled); + DRM_ERROR("Pending exe flush %d 0x%08x\n", + fc->pending_exe_flush, + fc->exe_flush_sequence); } return ((ret == -EINTR) ? -EAGAIN : ret); } @@ -348,6 +388,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, if (mask & ~fence->type) { DRM_ERROR("Wait trying to extend fence type" " 0x%08x 0x%08x\n", mask, fence->type); + BUG(); return -EINVAL; } @@ -402,6 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence, return 0; } +EXPORT_SYMBOL(drm_fence_object_wait); + int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) @@ -434,6 +477,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence, write_unlock_irqrestore(&fm->lock, flags); return 0; } +EXPORT_SYMBOL(drm_fence_object_emit); static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, @@ -545,6 +589,23 @@ void drm_fence_manager_init(struct drm_device * dev) write_unlock_irqrestore(&fm->lock, flags); } +void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + unsigned long irq_flags; + + read_lock_irqsave(&fm->lock, irq_flags); + arg->handle = fence->base.hash.key; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + arg->error = fence->error; + read_unlock_irqrestore(&fm->lock, irq_flags); +} +EXPORT_SYMBOL(drm_fence_fill_arg); + + void drm_fence_manager_takedown(struct drm_device * dev) { } @@ -572,7 +633,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -597,14 +657,10 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * /* * usage > 0. No need to lock dev->struct_mutex; */ - + arg->handle = fence->base.hash.key; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -642,7 +698,6 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil struct drm_fence_arg *arg = data; struct drm_fence_object *fence; struct drm_user_object *uo; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -654,12 +709,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil if (ret) return ret; fence = drm_lookup_fence_object(file_priv, arg->handle); - - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -687,7 +737,6 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -699,11 +748,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file if (!fence) return -EINVAL; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -715,7 +760,6 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -728,11 +772,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f return -EINVAL; ret = drm_fence_object_flush(fence, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -745,7 +785,6 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -760,11 +799,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, 0, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -777,7 +812,6 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -792,11 +826,7 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi ret = drm_fence_object_emit(fence, arg->flags, arg->class, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -808,7 +838,6 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -821,23 +850,22 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, + ret = drm_fence_buffer_objects(dev, NULL, arg->flags, NULL, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(file_priv, fence, - arg->flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; + + if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + } arg->handle = fence->base.hash.key; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 3d866333..6bd89b1d 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -50,6 +50,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, list_add_tail(&item->list, &priv->user_objects); return 0; } +EXPORT_SYMBOL(drm_add_user_object); struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { @@ -76,6 +77,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t } return item; } +EXPORT_SYMBOL(drm_lookup_user_object); static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { @@ -104,6 +106,7 @@ int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item drm_deref_user_object(priv, item); return 0; } +EXPORT_SYMBOL(drm_remove_user_object); static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, enum drm_ref_type action) @@ -196,6 +199,7 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, return drm_hash_entry(hash, struct drm_ref_object, hash); } +EXPORT_SYMBOL(drm_lookup_ref_object); static void drm_remove_other_references(struct drm_file * priv, struct drm_user_object * ro) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 096041d7..25072dbe 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -32,6 +32,7 @@ #define _DRM_OBJECTS_H struct drm_device; +struct drm_bo_mem_reg; /*************************************************** * User space objects. (drm_object.c) @@ -42,10 +43,14 @@ struct drm_device; enum drm_object_type { drm_fence_type, drm_buffer_type, - drm_ttm_type /* * Add other user space object types here. */ + drm_driver_type0 = 256, + drm_driver_type1, + drm_driver_type2, + drm_driver_type3, + drm_driver_type4 }; /* @@ -156,6 +161,7 @@ struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; + uint32_t error; }; #define _DRM_FENCE_CLASSES 8 @@ -192,7 +198,7 @@ struct drm_fence_driver { }; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, - uint32_t sequence, uint32_t type); + uint32_t sequence, uint32_t type, uint32_t error); extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, @@ -210,6 +216,12 @@ extern int drm_fence_object_wait(struct drm_fence_object * fence, extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, struct drm_fence_object ** c_fence); +extern int drm_fence_object_emit(struct drm_fence_object * fence, + uint32_t fence_flags, uint32_t class, + uint32_t type); +extern void drm_fence_fill_arg(struct drm_fence_object *fence, + struct drm_fence_arg *arg); + extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); @@ -258,23 +270,22 @@ struct drm_ttm_backend_func { unsigned long num_pages, struct page ** pages); void (*clear) (struct drm_ttm_backend * backend); int (*bind) (struct drm_ttm_backend * backend, - unsigned long offset, int cached); + struct drm_bo_mem_reg * bo_mem); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); }; -struct drm_ttm_backend { - uint32_t flags; - int mem_type; - struct drm_ttm_backend_func *func; -}; +typedef struct drm_ttm_backend { + struct drm_device *dev; + uint32_t flags; + struct drm_ttm_backend_func *func; +} drm_ttm_backend_t; struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; - unsigned long aper_offset; atomic_t vma_count; struct drm_device *dev; int destroy; @@ -290,11 +301,13 @@ struct drm_ttm { }; extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem); extern void drm_ttm_unbind(struct drm_ttm * ttm); extern void drm_ttm_evict(struct drm_ttm * ttm); extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); +extern void drm_ttm_cache_flush(void); +extern int drm_ttm_populate(struct drm_ttm * ttm); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -333,6 +346,8 @@ struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; + uint32_t desired_tile_stride; + uint32_t hw_tile_stride; }; struct drm_buffer_object { @@ -356,10 +371,13 @@ struct drm_buffer_object { uint32_t fence_type; uint32_t fence_class; + uint32_t new_fence_type; + uint32_t new_fence_class; struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; + unsigned long num_pages; /* For pinned buffers */ int pinned; @@ -368,7 +386,6 @@ struct drm_buffer_object { struct list_head pinned_lru; /* For vm */ - struct drm_ttm *ttm; struct drm_map_list map_list; uint32_t memory_type; @@ -395,6 +412,7 @@ struct drm_mem_type_manager { struct list_head pinned; uint32_t flags; uint32_t drm_bus_maptype; + unsigned long gpu_offset; unsigned long io_offset; unsigned long io_size; void *io_addr; @@ -434,7 +452,8 @@ struct drm_bo_driver { uint32_t num_mem_busy_prio; struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t * type); int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, struct drm_mem_type_manager * man); @@ -472,32 +491,44 @@ extern int drm_bo_pci_offset(struct drm_device *dev, extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); -extern int drm_fence_buffer_objects(struct drm_file * priv, +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo); +extern void drm_putback_buffer_objects(struct drm_device *dev); +extern int drm_fence_buffer_objects(struct drm_device * dev, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence); extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); -extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, - enum drm_bo_type type, uint64_t mask, - uint32_t hint, uint32_t page_alignment, - unsigned long buffer_start, - struct drm_buffer_object **bo); -extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, +extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size); -extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); -extern int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int sharable); -extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); +extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, + uint32_t fence_class, uint64_t flags, + uint64_t mask, uint32_t hint, + struct drm_bo_info_rep * rep, + struct drm_buffer_object **bo_rep); +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, + uint32_t handle, + int check_owner); +extern int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep); /* - * Buffer object memory move helpers. + * Buffer object memory move- and map helpers. * drm_bo_move.c */ @@ -513,11 +544,69 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, uint32_t fence_type, uint32_t fence_flags, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); +extern unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end); -extern int drm_mem_reg_ioremap(struct drm_device *dev, - struct drm_bo_mem_reg *mem, void **virtual); -extern void drm_mem_reg_iounmap(struct drm_device *dev, - struct drm_bo_mem_reg *mem, void *virtual); +struct drm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + bo_map_iomap, + bo_map_vmap, + bo_map_kmap, + bo_map_premapped, + } bo_kmap_type; +}; + +static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) +{ + *is_iomem = (map->bo_kmap_type == bo_map_iomap || + map->bo_kmap_type == bo_map_premapped); + return map->virtual; +} +extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); +extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map); + + +/* + * drm_regman.c + */ + +struct drm_reg { + struct list_head head; + struct drm_fence_object *fence; + uint32_t fence_type; + uint32_t new_fence_type; +}; + +struct drm_reg_manager { + struct list_head free; + struct list_head lru; + struct list_head unfenced; + + int (*reg_reusable)(const struct drm_reg *reg, const void *data); + void (*reg_destroy)(struct drm_reg *reg); +}; + +extern int drm_regs_alloc(struct drm_reg_manager *manager, + const void *data, + uint32_t fence_class, + uint32_t fence_type, + int interruptible, + int no_wait, + struct drm_reg **reg); + +extern void drm_regs_fence(struct drm_reg_manager *regs, + struct drm_fence_object *fence); + +extern void drm_regs_free(struct drm_reg_manager *manager); +extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); +extern void drm_regs_init(struct drm_reg_manager *manager, + int (*reg_reusable)(const struct drm_reg *, + const void *), + void (*reg_destroy)(struct drm_reg *)); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ @@ -526,5 +615,4 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, #else #define DRM_ASSERT_LOCKED(_mutex) #endif - #endif diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 60c64cba..33bbe1d4 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -35,11 +35,12 @@ static void drm_ttm_ipi_handler(void *null) flush_agp_cache(); } -static void drm_ttm_cache_flush(void) +void drm_ttm_cache_flush(void) { if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) DRM_ERROR("Timed out waiting for drm cache flush.\n"); } +EXPORT_SYMBOL(drm_ttm_cache_flush); /* * Use kmalloc if possible. Otherwise fall back to vmalloc. @@ -207,7 +208,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) return p; } -static int drm_ttm_populate(struct drm_ttm * ttm) +int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; @@ -308,7 +309,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) { int ret = 0; @@ -325,17 +326,16 @@ int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) if (ret) return ret; - if (ttm->state == ttm_unbound && !cached) { + if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); } - if ((ret = be->func->bind(be, aper_offset, cached))) { + if ((ret = be->func->bind(be, bo_mem))) { ttm->state = ttm_evicted; DRM_ERROR("Couldn't bind backend.\n"); return ret; } - ttm->aper_offset = aper_offset; ttm->state = ttm_bound; return 0; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index bf500cc6..75763e71 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,7 +38,9 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) return drm_agp_init_ttm(dev); } -int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, + uint32_t * fclass, + uint32_t * type) { if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; @@ -71,6 +73,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CACHED; man->drm_bus_maptype = 0; + man->gpu_offset = 0; break; case DRM_BO_MEM_TT: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -84,6 +87,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; break; case DRM_BO_MEM_PRIV0: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -97,6 +101,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); @@ -196,7 +201,7 @@ static int i915_move_flip(struct drm_buffer_object * bo, if (ret) return ret; - ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start); + ret = drm_bind_ttm(bo->ttm, &tmp_mem); if (ret) goto out_cleanup; diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 6f0de2ca..5a1653e9 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -63,7 +63,8 @@ static void i915_perform_flush(struct drm_device * dev) diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK; if (diff < driver->wrap_diff && diff != 0) { - drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); + drm_fence_handler(dev, 0, sequence, + DRM_FENCE_TYPE_EXE, 0); } if (dev_priv->fence_irq_on && !fc->pending_exe_flush) { @@ -82,7 +83,7 @@ static void i915_perform_flush(struct drm_device * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); } } @@ -103,7 +104,7 @@ static void i915_perform_flush(struct drm_device * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); } } diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 97d5330b..b86c5d7c 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -80,16 +80,16 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be) } static int -nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, - int cached) +nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - uint64_t offset = (pg_start << PAGE_SHIFT); + uint64_t offset = (mem->mm_node->start << PAGE_SHIFT); uint32_t i; - DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, + offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1); if (offset & NV_CTXDMA_PAGE_MASK) return -EINVAL; @@ -188,7 +188,6 @@ nouveau_sgdma_init_ttm(struct drm_device *dev) nvbe->dev = dev; nvbe->backend.func = &nouveau_sgdma_backend; - nvbe->backend.mem_type = DRM_BO_MEM_TT; return &nvbe->backend; } @@ -278,6 +277,8 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_ttm_backend *be; struct drm_scatter_gather sgreq; + struct drm_mm_node mm_node; + struct drm_bo_mem_reg mem; int ret; dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); @@ -303,7 +304,10 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) return ret; } - if ((ret = be->func->bind(be, 0, 0))) { + mm_node.start = 0; + mem.mm_node = &mm_node; + + if ((ret = be->func->bind(be, &mem))) { DRM_ERROR("failed bind: %d\n", ret); return ret; } diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index eb5ea826..a6c59832 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -37,7 +37,8 @@ struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) return drm_agp_init_ttm(dev); } -int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, + uint32_t * type) { *type = 3; return 0; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a6d4ece9..4576dc90 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -98,7 +98,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) drm_idlelock_release(&dev->lock); dev_priv->have_idlelock = 0; } - drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types); + drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, + signaled_flush_types, 0); } } diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index adedf300..721cc1a9 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -60,7 +60,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if (signaled_flush_types) { drm_fence_handler(dev, 0, info->complete_sequence, - signaled_flush_types); + signaled_flush_types, 0); } } diff --git a/shared-core/drm.h b/shared-core/drm.h index 30c7a1a3..21386a57 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -639,6 +639,7 @@ struct drm_set_version { #define DRM_FENCE_FLAG_SHAREABLE 0x00000002 #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 #define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008 +#define DRM_FENCE_FLAG_NO_USER 0x00000010 /* Reserved for driver use */ #define DRM_FENCE_MASK_DRIVER 0xFF000000 @@ -651,7 +652,7 @@ struct drm_fence_arg { unsigned int type; unsigned int flags; unsigned int signaled; - unsigned int pad64; + unsigned int error; uint64_t expand_pad[3]; /*Future expansion */ }; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 899817ec..3b26040f 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -214,7 +214,8 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f #ifdef I915_HAVE_BUFFER /* i915_buffer.c */ extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); -extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type); +extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t *type); extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, struct drm_mem_type_manager *man); diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 15e65950..0b474844 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -206,7 +206,8 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class, #ifdef VIA_HAVE_BUFFER extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev); -extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type); +extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t *type); extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); extern int via_init_mem_type(struct drm_device *dev, uint32_t type, struct drm_mem_type_manager *man); From bea727b8387f3094b9921004d7686a2d77184466 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 13:38:36 +0200 Subject: [PATCH 324/437] Make nouveau compile on older kernels. --- linux-core/drm_compat.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 870f8b73..f74f4bc2 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -193,7 +193,10 @@ extern void drm_clear_vma(struct vm_area_struct *vma, extern pgprot_t vm_get_page_prot(unsigned long vm_flags); #ifndef GFP_DMA32 -#define GFP_DMA32 0 +#define GFP_DMA32 GFP_KERNEL +#endif +#ifndef __GFP_DMA32 +#define __GFP_DMA32 GFP_KERNEL #endif #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) From 0774090d5b7d3eba734086b437021039bc19c365 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 13:59:56 +0200 Subject: [PATCH 325/437] Fix drm_bo.c compiling. --- linux-core/drm_bo.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1913df44..49a57a85 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1515,13 +1515,6 @@ int drm_bo_do_validate(struct drm_buffer_object *bo, if (ret) goto out; - if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); - return -EPERM; - } - DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); ret = drm_bo_new_mask(bo, flags, hint); @@ -1706,7 +1699,7 @@ int drm_buffer_object_create(struct drm_device *dev, } bo->fence_class = 0; - ret = driver->fence_type(bo, &bo->fence_type); + ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); goto out_err; From 54df1b9ff3b79097fedd8ed7bf54aca30a660cbd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 14:30:55 +0200 Subject: [PATCH 326/437] Fix pinned buffer fence class. --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b46d0361..717e5dab 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1995,7 +1995,7 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, /* Validate the buffer into its pinned location, with no * pending fence. */ - ret = drm_buffer_object_validate(bo, 0, 0, 0); + ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); if (ret) { mutex_unlock(&bo->mutex); return ret; From bb5516f4f47d16d5d59797fa170abd50d35377a7 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 12 Sep 2007 23:50:38 +1000 Subject: [PATCH 327/437] drm/ttm: fixup fence class naming and interfaces This is some code for nouveau that Ben Skeggs worked on, and also fixes the naming (having class in a system header file == C++ keyword == bad plan) --- linux-core/drm_bo.c | 19 ++++++-------- linux-core/drm_fence.c | 53 ++++++++++++++++++++-------------------- linux-core/drm_objects.h | 18 +++++++------- linux-core/i915_fence.c | 2 +- linux-core/via_fence.c | 4 +-- linux-core/xgi_fence.c | 2 +- 6 files changed, 48 insertions(+), 50 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 717e5dab..a2f66dc6 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -538,7 +538,7 @@ EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); int drm_fence_buffer_objects(struct drm_file * file_priv, struct list_head *list, - uint32_t fence_flags, + uint32_t fence_class, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { @@ -560,13 +560,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, list_for_each_entry(entry, list, lru) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); fence_type |= entry->fence_type; - if (entry->fence_class != 0) { - DRM_ERROR("Fence class %d is not implemented yet.\n", - entry->fence_class); - ret = -EINVAL; - goto out; - } - count++; + if (entry->fence_class == fence_class) + count++; } if (!count) { @@ -583,7 +578,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, list_splice_init(list, &f_list); if (fence) { - if ((fence_type & fence->type) != fence_type) { + if ((fence_type & fence->type) != fence_type || + (fence->fence_class != fence_class)) { DRM_ERROR("Given fence doesn't match buffers " "on unfenced list.\n"); ret = -EINVAL; @@ -591,7 +587,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } } else { mutex_unlock(&dev->struct_mutex); - ret = drm_fence_object_create(dev, 0, fence_type, + ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &fence); mutex_lock(&dev->struct_mutex); @@ -609,7 +605,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); list_del_init(l); - if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED && + entry->fence_class == fence_class) { count++; if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 2f16f7ef..a6787b09 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,14 +34,14 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(struct drm_device * dev, uint32_t class, +void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, uint32_t sequence, uint32_t type) { int wake = 0; uint32_t diff; uint32_t relevant; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; struct drm_fence_object *fence, *next; @@ -198,7 +198,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence, struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) - driver->poke_flush(dev, fence->class); + driver->poke_flush(dev, fence->fence_class); read_lock_irqsave(&fm->lock, flags); signaled = (fence->type & mask & fence->signaled) == (fence->type & mask); @@ -229,7 +229,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence, { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; @@ -253,7 +253,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence, } } write_unlock_irqrestore(&fm->lock, flags); - driver->poke_flush(dev, fence->class); + driver->poke_flush(dev, fence->fence_class); return 0; } @@ -262,10 +262,10 @@ int drm_fence_object_flush(struct drm_fence_object * fence, * wrapped around and reused. */ -void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence) { struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; @@ -308,7 +308,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence, { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -366,7 +366,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, } else { - if (driver->has_irq(dev, fence->class, + if (driver->has_irq(dev, fence->fence_class, DRM_FENCE_TYPE_EXE)) { ret = drm_fence_lazy_wait(fence, ignore_signals, DRM_FENCE_TYPE_EXE); @@ -374,7 +374,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, return ret; } - if (driver->has_irq(dev, fence->class, + if (driver->has_irq(dev, fence->fence_class, mask & ~DRM_FENCE_TYPE_EXE)) { ret = drm_fence_lazy_wait(fence, ignore_signals, mask); @@ -409,7 +409,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence, struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_driver *driver = dev->driver->fence_driver; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -421,7 +421,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence, return ret; write_lock_irqsave(&fm->lock, flags); - fence->class = class; + fence->fence_class = class; fence->type = type; fence->flush_mask = 0x00; fence->submitted_flush = 0x00; @@ -456,7 +456,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, */ INIT_LIST_HEAD(&fence->base.list); - fence->class = class; + fence->fence_class = class; fence->type = type; fence->flush_mask = 0; fence->submitted_flush = 0; @@ -466,7 +466,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, write_unlock_irqrestore(&fm->lock, flags); if (fence_flags & DRM_FENCE_FLAG_EMIT) { ret = drm_fence_object_emit(fence, fence_flags, - fence->class, type); + fence->fence_class, type); } return ret; } @@ -533,7 +533,7 @@ void drm_fence_manager_init(struct drm_device * dev) BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); for (i=0; inum_classes; ++i) { - class = &fm->class[i]; + class = &fm->fence_class[i]; INIT_LIST_HEAD(&class->ring); class->pending_flush = 0; @@ -582,7 +582,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * if (arg->flags & DRM_FENCE_FLAG_EMIT) LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_object_create(dev, arg->class, + ret = drm_fence_object_create(dev, arg->fence_class, arg->type, arg->flags, &fence); if (ret) return ret; @@ -601,7 +601,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -656,7 +656,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil fence = drm_lookup_fence_object(file_priv, arg->handle); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -700,7 +700,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -729,7 +729,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f ret = drm_fence_object_flush(fence, arg->type); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -761,7 +761,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi 0, arg->type); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -789,11 +789,11 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(fence, arg->flags, arg->class, + ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, arg->type); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -821,8 +821,8 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, - NULL, &fence); + ret = drm_fence_buffer_objects(file_priv, NULL, arg->fence_class, + arg->flags, NULL, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, @@ -834,9 +834,10 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; + arg->sequence = fence->sequence; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 096041d7..b2f1ae17 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -149,7 +149,7 @@ struct drm_fence_object { */ struct list_head ring; - int class; + int fence_class; uint32_t native_type; uint32_t type; uint32_t signaled; @@ -173,7 +173,7 @@ struct drm_fence_class_manager { struct drm_fence_manager { int initialized; rwlock_t lock; - struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; }; @@ -184,18 +184,18 @@ struct drm_fence_driver { uint32_t flush_diff; uint32_t sequence_mask; int lazy_capable; - int (*has_irq) (struct drm_device * dev, uint32_t class, + int (*has_irq) (struct drm_device * dev, uint32_t fence_class, uint32_t flags); - int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, + int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); - void (*poke_flush) (struct drm_device * dev, uint32_t class); + void (*poke_flush) (struct drm_device * dev, uint32_t fence_class); }; -extern void drm_fence_handler(struct drm_device *dev, uint32_t class, +extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, uint32_t sequence, uint32_t type); extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); -extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, +extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, uint32_t sequence); extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); extern int drm_fence_object_signaled(struct drm_fence_object * fence, @@ -208,7 +208,7 @@ extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, - uint32_t fence_flags, uint32_t class, + uint32_t fence_flags, uint32_t fence_class, struct drm_fence_object ** c_fence); extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); @@ -474,7 +474,7 @@ extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * me extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, - uint32_t fence_flags, + uint32_t fence_class, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence); extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 6f0de2ca..89830333 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -42,7 +42,7 @@ static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a6d4ece9..8d60afa6 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -42,7 +42,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - struct drm_fence_class_manager *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -204,7 +204,7 @@ void via_fence_timer(unsigned long data) drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - struct drm_fence_class_manager *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.fence_class[0]; if (!dev_priv) return; diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index adedf300..22e1dced 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -33,7 +33,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) { struct xgi_info * info = dev->dev_private; - struct drm_fence_class_manager * fc = &dev->fm.class[class]; + struct drm_fence_class_manager * fc = &dev->fm.fence_class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; From 03c47f1420bf17a1e0f2b86be500656ae5a4c95b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 25 Sep 2007 16:16:14 +1000 Subject: [PATCH 328/437] drm: use fence_class as name instead of class --- libdrm/xf86drm.c | 29 ++++++++++++++++------------- libdrm/xf86mm.h | 9 +++++---- shared-core/drm.h | 4 ++-- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 0849f896..dc18d6f9 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2345,7 +2345,7 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data, * DRM_FENCE_MASK_DRIVER */ -int drmFenceCreate(int fd, unsigned flags, int class, unsigned type, +int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type, drmFence *fence) { drm_fence_arg_t arg; @@ -2353,11 +2353,12 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type, memset(&arg, 0, sizeof(arg)); arg.flags = flags; arg.type = type; - arg.class = class; + arg.fence_class = fence_class; + if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg)) return -errno; fence->handle = arg.handle; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->flags = arg.flags; fence->signaled = 0; @@ -2370,19 +2371,21 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type, * DRM_FENCE_MASK_DRIVER */ -int drmFenceBuffers(int fd, unsigned flags, drmFence *fence) +int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.flags = flags; + arg.fence_class = fence_class; if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg)) return -errno; fence->handle = arg.handle; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->flags = arg.flags; + fence->sequence = arg.sequence; fence->signaled = 0; return 0; } @@ -2409,7 +2412,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence) if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg)) return -errno; fence->handle = arg.handle; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->flags = arg.flags; fence->signaled = arg.signaled; @@ -2438,7 +2441,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type) if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg)) return -errno; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return 0; @@ -2453,7 +2456,7 @@ int drmFenceUpdate(int fd, drmFence *fence) if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg)) return -errno; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return 0; @@ -2486,14 +2489,14 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type) drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); - arg.class = fence->class; + arg.fence_class = fence->fence_class; arg.flags = flags; arg.handle = fence->handle; arg.type = emit_type; if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg)) return -errno; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return 0; @@ -2532,7 +2535,7 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type) if (ret) return -errno; - fence->class = arg.class; + fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return 0; @@ -2878,7 +2881,7 @@ int drmBOUnmap(int fd, drmBO *buf) return 0; } -int drmBOValidate(int fd, drmBO *buf, +int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class, uint64_t flags, uint64_t mask, unsigned hint) { @@ -2892,7 +2895,7 @@ int drmBOValidate(int fd, drmBO *buf, req->bo_req.flags = flags; req->bo_req.mask = mask; req->bo_req.hint = hint; - req->bo_req.fence_class = 0; /* Backwards compatibility. */ + req->bo_req.fence_class = fence_class; req->op = drm_bo_validate; do{ diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index d86644ca..cacd13af 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -96,10 +96,11 @@ typedef struct _drmMMListHead typedef struct _drmFence { unsigned handle; - int class; + int fence_class; unsigned type; unsigned flags; unsigned signaled; + uint32_t sequence; unsigned pad[4]; /* for future expansion */ } drmFence; @@ -148,7 +149,7 @@ typedef struct _drmBOList { * Fence functions. */ -extern int drmFenceCreate(int fd, unsigned flags, int class, +extern int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type, drmFence *fence); extern int drmFenceDestroy(int fd, const drmFence *fence); extern int drmFenceReference(int fd, unsigned handle, drmFence *fence); @@ -160,7 +161,7 @@ extern int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type); extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type); -extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence); +extern int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence); /* @@ -188,7 +189,7 @@ extern int drmBOUnReference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address); extern int drmBOUnmap(int fd, drmBO *buf); -extern int drmBOValidate(int fd, drmBO *buf, uint64_t flags, +extern int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class, uint64_t flags, uint64_t mask, unsigned hint); extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle); diff --git a/shared-core/drm.h b/shared-core/drm.h index 30c7a1a3..b4754ead 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -647,11 +647,11 @@ struct drm_set_version { struct drm_fence_arg { unsigned int handle; - unsigned int class; + unsigned int fence_class; unsigned int type; unsigned int flags; unsigned int signaled; - unsigned int pad64; + unsigned int sequence; uint64_t expand_pad[3]; /*Future expansion */ }; From 6671ad1917698b6174a1af314b63b3800d75248c Mon Sep 17 00:00:00 2001 From: Alan Hourihane Date: Wed, 26 Sep 2007 15:38:54 +0100 Subject: [PATCH 329/437] don't copy back if an error was returned. --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index cedb6d50..8513a28f 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -645,7 +645,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retcode = func(dev, kdata, file_priv); } - if (cmd & IOC_OUT) { + if ((retcode == 0) && cmd & IOC_OUT) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EACCES; From b44925b2a553df6a611db320b553336a946aa1a8 Mon Sep 17 00:00:00 2001 From: Alan Hourihane Date: Wed, 26 Sep 2007 16:18:19 +0100 Subject: [PATCH 330/437] Add brackets --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 8513a28f..73598892 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -645,7 +645,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retcode = func(dev, kdata, file_priv); } - if ((retcode == 0) && cmd & IOC_OUT) { + if ((retcode == 0) && (cmd & IOC_OUT)) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EACCES; From 24cdd2f8c494573e1f84a752ae4eccec8890347a Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Wed, 26 Sep 2007 14:25:10 -0700 Subject: [PATCH 331/437] Allow parallel module compile --- linux-core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/Makefile b/linux-core/Makefile index 1cdf3b30..f2519ed5 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -163,7 +163,7 @@ endif all: modules modules: includes - make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules + +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules ifeq ($(HEADERFROMBOOT),1) From 0bb2395a8be0c33cc687dfd6aae7df81a82ed8e5 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 28 Sep 2007 10:10:08 -0700 Subject: [PATCH 332/437] Revert drm_i915_flip_t braindamage I should not have renamed this field. I should not have renamed this field. I should not have renamed this field. On the plus side, it was at least binary compatible. --- shared-core/i915_dma.c | 7 ++++--- shared-core/i915_drm.h | 8 +++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index daa03df8..3a9ecab2 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -755,13 +755,14 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *f LOCK_TEST_WITH_RETURN(dev, file_priv); - if (param->planes & ~0x3) { + /* This is really planes */ + if (param->pipes & ~0x3) { DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n", - param->planes); + param->pipes); return -EINVAL; } - i915_dispatch_flip(dev, param->planes, 0); + i915_dispatch_flip(dev, param->pipes, 0); return 0; } diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index a57ffa73..9976804e 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -182,7 +182,13 @@ typedef struct _drm_i915_sarea { /* Asynchronous page flipping: */ typedef struct drm_i915_flip { - int planes; + /* + * This is really talking about planes, and we could rename it + * except for the fact that some of the duplicated i915_drm.h files + * out there check for HAVE_I915_FLIP and so might pick up this + * version. + */ + int pipes; } drm_i915_flip_t; /* Allow drivers to submit batchbuffers directly to hardware, relying From 72134e939eda578bc53746bf43f7096cbeaf9b7b Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Fri, 28 Sep 2007 21:28:47 +0200 Subject: [PATCH 333/437] nouveau : clean chan->pgraph_ctx stuff. We now do a static init of the array. This avoid hardcoding pgraph_ctx size and potential buffer overflow. --- shared-core/nv10_graph.c | 543 ++++++++++++++++++++------------------- 1 file changed, 282 insertions(+), 261 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 1fd185a0..311e0e91 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -42,244 +42,6 @@ struct pipe_state { uint32_t pipe_0x7800[0x0c0/4]; }; -/* TODO dynamic allocation ??? */ -static struct pipe_state pipe_state[NV10_FIFO_NUMBER]; - -static void nv10_graph_save_pipe(struct nouveau_channel *chan) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct pipe_state *fifo_pipe_state = pipe_state + chan->id; - int i; -#define PIPE_SAVE(addr) \ - do { \ - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ - for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ - fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \ - } while (0) - - PIPE_SAVE(0x4400); - PIPE_SAVE(0x0200); - PIPE_SAVE(0x6400); - PIPE_SAVE(0x6800); - PIPE_SAVE(0x6c00); - PIPE_SAVE(0x7000); - PIPE_SAVE(0x7400); - PIPE_SAVE(0x7800); - PIPE_SAVE(0x0040); - PIPE_SAVE(0x0000); - -#undef PIPE_SAVE -} - -static void nv10_graph_load_pipe(struct nouveau_channel *chan) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct pipe_state *fifo_pipe_state = pipe_state + chan->id; - int i; - uint32_t xfmode0, xfmode1; -#define PIPE_RESTORE(addr) \ - do { \ - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ - for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ - NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ - } while (0) - - - nouveau_wait_for_idle(dev); - /* XXX check haiku comments */ - xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0); - xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1); - NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000); - NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); - for (i = 0; i < 4; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - for (i = 0; i < 4; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); - for (i = 0; i < 3; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); - - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); - for (i = 0; i < 3; i++) - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); - - NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); - NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008); - - - PIPE_RESTORE(0x0200); - nouveau_wait_for_idle(dev); - - /* restore XFMODE */ - NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0); - NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1); - PIPE_RESTORE(0x6400); - PIPE_RESTORE(0x6800); - PIPE_RESTORE(0x6c00); - PIPE_RESTORE(0x7000); - PIPE_RESTORE(0x7400); - PIPE_RESTORE(0x7800); - PIPE_RESTORE(0x4400); - PIPE_RESTORE(0x0000); - PIPE_RESTORE(0x0040); - nouveau_wait_for_idle(dev); - -#undef PIPE_RESTORE -} - -static void nv10_graph_create_pipe(struct nouveau_channel *chan) { - struct pipe_state *fifo_pipe_state = pipe_state + chan->id; - uint32_t *fifo_pipe_state_addr; - int i; -#define PIPE_INIT(addr) \ - do { \ - fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ - } while (0) -#define PIPE_INIT_END(addr) \ - do { \ - if (fifo_pipe_state_addr != \ - sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \ - DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \ - sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \ - } while (0) -#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value - - PIPE_INIT(0x0200); - for (i = 0; i < 48; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x0200); - - PIPE_INIT(0x6400); - for (i = 0; i < 211; i++) - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x3f800000); - NV_WRITE_PIPE_INIT(0x40000000); - NV_WRITE_PIPE_INIT(0x40000000); - NV_WRITE_PIPE_INIT(0x40000000); - NV_WRITE_PIPE_INIT(0x40000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x3f800000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x3f000000); - NV_WRITE_PIPE_INIT(0x3f000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x3f800000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x3f800000); - NV_WRITE_PIPE_INIT(0x3f800000); - NV_WRITE_PIPE_INIT(0x3f800000); - NV_WRITE_PIPE_INIT(0x3f800000); - PIPE_INIT_END(0x6400); - - PIPE_INIT(0x6800); - for (i = 0; i < 162; i++) - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x3f800000); - for (i = 0; i < 25; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x6800); - - PIPE_INIT(0x6c00); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0xbf800000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x6c00); - - PIPE_INIT(0x7000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x00000000); - NV_WRITE_PIPE_INIT(0x7149f2ca); - for (i = 0; i < 35; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x7000); - - PIPE_INIT(0x7400); - for (i = 0; i < 48; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x7400); - - PIPE_INIT(0x7800); - for (i = 0; i < 48; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x7800); - - PIPE_INIT(0x4400); - for (i = 0; i < 32; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x4400); - - PIPE_INIT(0x0000); - for (i = 0; i < 16; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x0000); - - PIPE_INIT(0x0040); - for (i = 0; i < 4; i++) - NV_WRITE_PIPE_INIT(0x00000000); - PIPE_INIT_END(0x0040); - -#undef PIPE_INIT -#undef PIPE_INIT_END -#undef NV_WRITE_PIPE_INIT -} - static int nv10_graph_ctx_regs [] = { NV10_PGRAPH_CTX_SWITCH1, NV10_PGRAPH_CTX_SWITCH2, @@ -623,20 +385,270 @@ NV10_PGRAPH_DEBUG_4, 0x00400a04, }; +struct graph_state { + int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])]; + int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])]; +}; + +/* TODO dynamic allocation ??? */ +static struct pipe_state pipe_state[NV10_FIFO_NUMBER]; +static struct graph_state graph_state[NV10_FIFO_NUMBER]; + + +static void nv10_graph_save_pipe(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + int i; +#define PIPE_SAVE(addr) \ + do { \ + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ + fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \ + } while (0) + + PIPE_SAVE(0x4400); + PIPE_SAVE(0x0200); + PIPE_SAVE(0x6400); + PIPE_SAVE(0x6800); + PIPE_SAVE(0x6c00); + PIPE_SAVE(0x7000); + PIPE_SAVE(0x7400); + PIPE_SAVE(0x7800); + PIPE_SAVE(0x0040); + PIPE_SAVE(0x0000); + +#undef PIPE_SAVE +} + +static void nv10_graph_load_pipe(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + int i; + uint32_t xfmode0, xfmode1; +#define PIPE_RESTORE(addr) \ + do { \ + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ + NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ + } while (0) + + + nouveau_wait_for_idle(dev); + /* XXX check haiku comments */ + xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0); + xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1); + NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000); + NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); + for (i = 0; i < 4; i++) + NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); + for (i = 0; i < 4; i++) + NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); + for (i = 0; i < 3; i++) + NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); + + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); + for (i = 0; i < 3; i++) + NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); + NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008); + + + PIPE_RESTORE(0x0200); + nouveau_wait_for_idle(dev); + + /* restore XFMODE */ + NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0); + NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1); + PIPE_RESTORE(0x6400); + PIPE_RESTORE(0x6800); + PIPE_RESTORE(0x6c00); + PIPE_RESTORE(0x7000); + PIPE_RESTORE(0x7400); + PIPE_RESTORE(0x7800); + PIPE_RESTORE(0x4400); + PIPE_RESTORE(0x0000); + PIPE_RESTORE(0x0040); + nouveau_wait_for_idle(dev); + +#undef PIPE_RESTORE +} + +static void nv10_graph_create_pipe(struct nouveau_channel *chan) { + struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + uint32_t *fifo_pipe_state_addr; + int i; +#define PIPE_INIT(addr) \ + do { \ + fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ + } while (0) +#define PIPE_INIT_END(addr) \ + do { \ + if (fifo_pipe_state_addr != \ + sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \ + DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \ + sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \ + } while (0) +#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value + + PIPE_INIT(0x0200); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0200); + + PIPE_INIT(0x6400); + for (i = 0; i < 211; i++) + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + PIPE_INIT_END(0x6400); + + PIPE_INIT(0x6800); + for (i = 0; i < 162; i++) + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + for (i = 0; i < 25; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6800); + + PIPE_INIT(0x6c00); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0xbf800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6c00); + + PIPE_INIT(0x7000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + for (i = 0; i < 35; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7000); + + PIPE_INIT(0x7400); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7400); + + PIPE_INIT(0x7800); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7800); + + PIPE_INIT(0x4400); + for (i = 0; i < 32; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x4400); + + PIPE_INIT(0x0000); + for (i = 0; i < 16; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0000); + + PIPE_INIT(0x0040); + for (i = 0; i < 4; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0040); + +#undef PIPE_INIT +#undef PIPE_INIT_END +#undef NV_WRITE_PIPE_INIT +} + static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i, j; + int i; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) { if (nv10_graph_ctx_regs[i] == reg) return i; } - if (dev_priv->chipset>=0x17) { - for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) { - if (nv17_graph_ctx_regs[j] == reg) - return i; - } + DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg); + return -1; +} + +static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) +{ + int i; + for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) { + if (nv17_graph_ctx_regs[i] == reg) + return i; } + DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg); return -1; } @@ -644,13 +656,14 @@ int nv10_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - int i, j; + struct graph_state* pgraph_ctx = graph_state + chan->id; + int i; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) - NV_WRITE(nv10_graph_ctx_regs[i], chan->pgraph_ctx[i]); + NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); if (dev_priv->chipset>=0x17) { - for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) - NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]); + for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) + NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]); } nv10_graph_load_pipe(chan); @@ -662,13 +675,14 @@ int nv10_graph_save_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - int i, j; + struct graph_state* pgraph_ctx = graph_state + chan->id; + int i; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) - chan->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); + pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]); if (dev_priv->chipset>=0x17) { - for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) - chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]); + for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) + pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]); } nv10_graph_save_pipe(chan); @@ -737,16 +751,23 @@ void nouveau_nv10_context_switch(struct drm_device *dev) #define NV_WRITE_CTX(reg, val) do { \ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ if (offset > 0) \ - chan->pgraph_ctx[offset] = val; \ + pgraph_ctx->nv10[offset] = val; \ + } while (0) + +#define NV17_WRITE_CTX(reg, val) do { \ + int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ + if (offset > 0) \ + pgraph_ctx->nv17[offset] = val; \ } while (0) int nv10_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct graph_state* pgraph_ctx = graph_state + chan->id; DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); - memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx)); + memset(pgraph_ctx, 0, sizeof(*pgraph_ctx)); /* mmio trace suggest that should be done in ddx with methods/objects */ #if 0 @@ -786,12 +807,12 @@ int nv10_graph_create_context(struct nouveau_channel *chan) { NV_WRITE_CTX(0x00400e34, 0x00080008); if (dev_priv->chipset>=0x17) { /* is it really needed ??? */ - NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); - NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); - NV_WRITE_CTX(0x00400eac, 0x0fff0000); - NV_WRITE_CTX(0x00400eb0, 0x0fff0000); - NV_WRITE_CTX(0x00400ec0, 0x00000080); - NV_WRITE_CTX(0x00400ed0, 0x00000080); + NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); + NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); + NV17_WRITE_CTX(0x00400eac, 0x0fff0000); + NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); + NV17_WRITE_CTX(0x00400ec0, 0x00000080); + NV17_WRITE_CTX(0x00400ed0, 0x00000080); } NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); From 215eab6ccfb6d3a22218f996c8215a7dcaf65d01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 27 Sep 2007 08:01:58 +0200 Subject: [PATCH 334/437] Don't build without any optimization on Linux. Building without optimization causes the drm module not to link correctly on ppc. --- linux-core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/Makefile b/linux-core/Makefile index f2519ed5..6eb5bf5c 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -269,7 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif -EXTRA_CFLAGS += -g -O0 +EXTRA_CFLAGS += -g # Start with all modules turned off. CONFIG_DRM_GAMMA := n From f863d23e01bf0b851c2c7addedfaec77ef951a0c Mon Sep 17 00:00:00 2001 From: chaohong guo Date: Sat, 29 Sep 2007 18:06:47 +0200 Subject: [PATCH 335/437] radeon: Commit the ring after each partial texture upload blit. This makes sure each blit starts as early as possible, which may improve texture upload performance in some cases. --- shared-core/radeon_state.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index ac7f6011..e3aadfb9 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -1861,6 +1861,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, OUT_RING((image->width << 16) | height); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); + COMMIT_RING(); radeon_cp_discard_buffer(dev, buf); @@ -1878,6 +1879,8 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); + COMMIT_RING(); + return 0; } @@ -2401,7 +2404,6 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); - COMMIT_RING(); return ret; } From 097db7a9b0cd0e53fb82dffa57c662f327c19670 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sat, 29 Sep 2007 23:05:44 +0200 Subject: [PATCH 336/437] nouveau : nv1x fix strange corruption that appears when running glxgears and nouveau demo --- shared-core/nv10_graph.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index 311e0e91..c115ba7f 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -714,7 +714,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) next = dev_priv->fifos[chid]; if (!next) { - DRM_DEBUG("Invalid next channel\n"); + DRM_ERROR("Invalid next channel\n"); return; } @@ -722,7 +722,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev) last = dev_priv->fifos[chid]; if (!last) { - DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", + DRM_INFO("WARNING: Invalid last channel, switch to %x\n", next->id); } else { DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", @@ -827,6 +827,9 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) int chid; chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ???? + */ +#if 0 /* does this avoid a potential context switch while we are written graph * reg, or we should mask graph interrupt ??? */ @@ -838,7 +841,12 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) nv10_graph_create_context(chan); nv10_graph_load_context(chan); } - NV_WRITE(NV04_PGRAPH_FIFO,0x1); + NV_WRITE(NV04_PGRAPH_FIFO, 0x1); +#else + if (chid == chan->id) { + DRM_INFO("cleanning a channel with graph in current context\n"); + } +#endif } int nv10_graph_init(struct drm_device *dev) { From f8f31f04574cd6986d7d9ef2215bbb23e0f44b73 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sat, 29 Sep 2007 23:06:29 +0200 Subject: [PATCH 337/437] nouveau : stop the fifo of the channel we are deleting --- shared-core/nouveau_fifo.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 437c84f2..f82d130b 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -403,7 +403,19 @@ void nouveau_fifo_free(struct nouveau_channel *chan) /* disable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); + NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); + /* stop the fifo, otherwise it could be running and + * it will crash when removing gpu objects */ + if (dev_priv->card_type < NV_50) { + NV_WRITE(NV03_FIFO_REGS_DMAPUT(chan->id), chan->pushbuf_base); + NV_WRITE(NV03_FIFO_REGS_DMAGET(chan->id), chan->pushbuf_base); + } else { + NV_WRITE(NV50_FIFO_REGS_DMAPUT(chan->id), chan->pushbuf_base); + NV_WRITE(NV50_FIFO_REGS_DMAGET(chan->id), chan->pushbuf_base); + } // FIXME XXX needs more code engine->fifo.destroy_context(chan); @@ -412,6 +424,10 @@ void nouveau_fifo_free(struct nouveau_channel *chan) engine->graph.destroy_context(chan); /* reenable the fifo caches */ + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, + NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); + NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); + NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); /* Deallocate push buffer */ From c76e04828bd5849f526fae5af7deaf1cbc9f4c55 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sun, 30 Sep 2007 14:21:47 +0200 Subject: [PATCH 338/437] nouveau : nv04 don't use chan->pgraph_ctx array This commit is a first step to dynamic alloc pgraph context on nv04, nv10. --- shared-core/nv04_graph.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 2cf052cf..b07bcfb9 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -346,6 +346,13 @@ static uint32_t nv04_graph_ctx_regs [] = { }; +struct graph_state { + int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])]; +}; + +/* TODO dynamic allocation ??? */ +static struct graph_state graph_state[16]; + void nouveau_nv04_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -414,12 +421,13 @@ void nouveau_nv04_context_switch(struct drm_device *dev) } int nv04_graph_create_context(struct nouveau_channel *chan) { + struct graph_state* pgraph_ctx = graph_state + chan->id; DRM_DEBUG("nv04_graph_context_create %d\n", chan->id); - memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx)); + memset(pgraph_ctx, 0, sizeof(*pgraph_ctx)); //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; - chan->pgraph_ctx[0] = 0x0001ffff; + pgraph_ctx->nv04[0] = 0x0001ffff; /* is it really needed ??? */ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); @@ -435,10 +443,11 @@ int nv04_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct graph_state* pgraph_ctx = graph_state + chan->id; int i; for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) - NV_WRITE(nv04_graph_ctx_regs[i], chan->pgraph_ctx[i]); + NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); return 0; } @@ -447,10 +456,11 @@ int nv04_graph_save_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct graph_state* pgraph_ctx = graph_state + chan->id; int i; for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) - chan->pgraph_ctx[i] = NV_READ(nv04_graph_ctx_regs[i]); + pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]); return 0; } @@ -467,10 +477,6 @@ int nv04_graph_init(struct drm_device *dev) { NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); - // check the context is big enough - if ( sizeof(nv04_graph_ctx_regs)>sizeof(dev_priv->fifos[0]->pgraph_ctx) ) - DRM_ERROR("pgraph_ctx too small\n"); - NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100); From fb3ed99fb110a540d16923417c33ff581721ba3a Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sun, 30 Sep 2007 14:50:22 +0200 Subject: [PATCH 339/437] nouveau : pgraph_ctx dynamic alloc for nv04, nv10 --- shared-core/nouveau_drv.h | 3 ++- shared-core/nv04_graph.c | 19 ++++++++++++------- shared-core/nv10_graph.c | 35 ++++++++++++++++++++++------------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index e96c8fad..02a4ee75 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -120,8 +120,9 @@ struct nouveau_channel struct nouveau_gpuobj_ref *ramfc; /* PGRAPH context */ + /* XXX may be merge 2 pointers as private data ??? */ struct nouveau_gpuobj_ref *ramin_grctx; - uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */ + void *pgraph_ctx; /* NV50 VM */ struct nouveau_gpuobj *vm_pd; diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index b07bcfb9..33dd0a86 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -350,9 +350,6 @@ struct graph_state { int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])]; }; -/* TODO dynamic allocation ??? */ -static struct graph_state graph_state[16]; - void nouveau_nv04_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -421,10 +418,14 @@ void nouveau_nv04_context_switch(struct drm_device *dev) } int nv04_graph_create_context(struct nouveau_channel *chan) { - struct graph_state* pgraph_ctx = graph_state + chan->id; + struct graph_state* pgraph_ctx; DRM_DEBUG("nv04_graph_context_create %d\n", chan->id); - memset(pgraph_ctx, 0, sizeof(*pgraph_ctx)); + chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), + DRM_MEM_DRIVER); + + if (pgraph_ctx == NULL) + return -ENOMEM; //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; pgraph_ctx->nv04[0] = 0x0001ffff; @@ -437,13 +438,17 @@ int nv04_graph_create_context(struct nouveau_channel *chan) { void nv04_graph_destroy_context(struct nouveau_channel *chan) { + struct graph_state* pgraph_ctx = chan->pgraph_ctx; + + drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); + chan->pgraph_ctx = NULL; } int nv04_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct graph_state* pgraph_ctx = graph_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; int i; for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) @@ -456,7 +461,7 @@ int nv04_graph_save_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct graph_state* pgraph_ctx = graph_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; int i; for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index c115ba7f..c6319b8f 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -388,17 +388,14 @@ NV10_PGRAPH_DEBUG_4, struct graph_state { int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])]; int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])]; + struct pipe_state pipe_state; }; -/* TODO dynamic allocation ??? */ -static struct pipe_state pipe_state[NV10_FIFO_NUMBER]; -static struct graph_state graph_state[NV10_FIFO_NUMBER]; - - static void nv10_graph_save_pipe(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; + struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; int i; #define PIPE_SAVE(addr) \ do { \ @@ -424,7 +421,8 @@ static void nv10_graph_save_pipe(struct nouveau_channel *chan) { static void nv10_graph_load_pipe(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; + struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; int i; uint32_t xfmode0, xfmode1; #define PIPE_RESTORE(addr) \ @@ -480,7 +478,8 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan) { } static void nv10_graph_create_pipe(struct nouveau_channel *chan) { - struct pipe_state *fifo_pipe_state = pipe_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; + struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; uint32_t *fifo_pipe_state_addr; int i; #define PIPE_INIT(addr) \ @@ -656,7 +655,7 @@ int nv10_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct graph_state* pgraph_ctx = graph_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; int i; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) @@ -675,7 +674,7 @@ int nv10_graph_save_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct graph_state* pgraph_ctx = graph_state + chan->id; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; int i; for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) @@ -763,11 +762,15 @@ void nouveau_nv10_context_switch(struct drm_device *dev) int nv10_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct graph_state* pgraph_ctx = graph_state + chan->id; + struct graph_state* pgraph_ctx; DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); - memset(pgraph_ctx, 0, sizeof(*pgraph_ctx)); + chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), + DRM_MEM_DRIVER); + + if (pgraph_ctx == NULL) + return -ENOMEM; /* mmio trace suggest that should be done in ddx with methods/objects */ #if 0 @@ -824,7 +827,12 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct graph_state* pgraph_ctx = chan->pgraph_ctx; int chid; + + drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); + chan->pgraph_ctx = NULL; + chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ???? @@ -838,7 +846,8 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) DRM_INFO("cleanning a channel with graph in current context\n"); nouveau_wait_for_idle(dev); DRM_INFO("reseting current graph context\n"); - nv10_graph_create_context(chan); + /* can't be call here because of dynamic mem alloc */ + //nv10_graph_create_context(chan); nv10_graph_load_context(chan); } NV_WRITE(NV04_PGRAPH_FIFO, 0x1); From a45fce77125aafc42c2cae6b5a896526ec4ab630 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sat, 29 Sep 2007 21:07:46 +0300 Subject: [PATCH 340/437] nouveau: NV30 should never call nouveau_nv20_context_switch(). --- shared-core/nouveau_irq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index e64677ed..c0199c60 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -364,7 +364,6 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) nouveau_nv10_context_switch(dev); break; case NV_20: - case NV_30: nouveau_nv20_context_switch(dev); break; default: From 88bdb38cea60cea918b6e6a1ca97a7ec3de5b832 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sat, 29 Sep 2007 21:09:09 +0300 Subject: [PATCH 341/437] nouveau: Change couple constants to symbols. --- shared-core/nv30_graph.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 590a5c33..2210abdb 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -2769,11 +2769,11 @@ nouveau_graph_wait_idle(struct drm_device *dev) int tv = 1000; while (tv--) { - if (NV_READ(0x400700) == 0) + if (NV_READ(NV04_PGRAPH_STATUS) == 0) break; } - if (NV_READ(0x400700)) { + if (NV_READ(NV04_PGRAPH_STATUS)) { DRM_ERROR("timeout!\n"); return -EBUSY; } @@ -2859,7 +2859,7 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(0x400B84, 0x0c000000); NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f); NV_WRITE(0x4000c0, 0x00000016); - NV_WRITE(0x400780, 0x000014e4); + NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, 0x000014e4); /* copy tile info from PFB */ for (i=0; i Date: Sat, 29 Sep 2007 23:06:29 +0300 Subject: [PATCH 342/437] nouveau: Make nv20 use the nv30 PGRAPH ctx functions. --- shared-core/nouveau_state.c | 8 ++++---- shared-core/nv20_graph.c | 2 ++ shared-core/nv30_graph.c | 25 +++++++++++++++++++++++-- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index e73b4878..f8dd3ad5 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -166,10 +166,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.takedown = nv10_fb_takedown; engine->graph.init = nv20_graph_init; engine->graph.takedown = nv20_graph_takedown; - engine->graph.create_context = nv20_graph_create_context; - engine->graph.destroy_context = nv20_graph_destroy_context; - engine->graph.load_context = nv20_graph_load_context; - engine->graph.save_context = nv20_graph_save_context; + engine->graph.create_context = nv30_graph_create_context; + engine->graph.destroy_context = nv30_graph_destroy_context; + engine->graph.load_context = nv30_graph_load_context; + engine->graph.save_context = nv30_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv10_fifo_create_context; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index c163daf9..252d90b5 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -29,6 +29,7 @@ #define NV20_GRCTX_SIZE (3529*4) +#if 0 int nv20_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -56,6 +57,7 @@ void nv20_graph_destroy_context(struct nouveau_channel *chan) { INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); } +#endif /* 0 */ static void nv20_graph_rdi(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 2210abdb..108412ed 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -23,11 +23,23 @@ * */ +/*#define NV20_GRCTX_SIZE (3529*4)*/ + +#define NV28_GRCTX_SIZE (3529*4) #define NV30_31_GRCTX_SIZE (22392) #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) + +static void nv28_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) +{ + int i; + (void)dev; + +} + static void nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -2715,6 +2727,10 @@ int nv30_graph_create_context(struct nouveau_channel *chan) int ret; switch (dev_priv->chipset) { + case 0x28: + ctx_size = NV28_GRCTX_SIZE; + ctx_init = nv28_graph_context_init; + break; case 0x30: case 0x31: ctx_size = NV30_31_GRCTX_SIZE; @@ -2732,7 +2748,9 @@ int nv30_graph_create_context(struct nouveau_channel *chan) default: ctx_size = 0; ctx_init = nv35_36_graph_context_init; - DRM_ERROR("Please contact the devs if you want your NV%x card to work\n",dev_priv->chipset); + DRM_ERROR("Please contact the devs if you want your NV%x" + " card to work\n", dev_priv->chipset); + return -ENOSYS; break; } @@ -2744,7 +2762,10 @@ int nv30_graph_create_context(struct nouveau_channel *chan) /* Initialise default context values */ ctx_init(dev, chan->ramin_grctx->gpuobj); - INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x28/4, (chan->id<<24)|0x1); /* CTX_USER */ + /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ + INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x28/4, (chan->id<<24)|0x1); + /* CTX_USER */ + INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, chan->ramin_grctx->instance >> 4); From 8ad605a2644251a400700e6f0e25ef76a1c80628 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sat, 29 Sep 2007 23:17:19 +0300 Subject: [PATCH 343/437] nouveau: let nv20 hardware do ctx switching automatically. --- shared-core/nouveau_irq.c | 3 --- shared-core/nv20_graph.c | 26 +++++++++++++------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index c0199c60..45ae6edf 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -363,9 +363,6 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) case NV_17: nouveau_nv10_context_switch(dev); break; - case NV_20: - nouveau_nv20_context_switch(dev); - break; default: DRM_ERROR("Context switch not implemented\n"); break; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 252d90b5..de97a591 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -57,18 +57,6 @@ void nv20_graph_destroy_context(struct nouveau_channel *chan) { INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); } -#endif /* 0 */ - -static void nv20_graph_rdi(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i; - - NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); - for (i = 0; i < 32; i++) - NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); - - nouveau_wait_for_idle(dev); -} /* Save current context (from PGRAPH) into the channel's context */ @@ -145,6 +133,18 @@ void nouveau_nv20_context_switch(struct drm_device *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x1); } +#endif /* 0 */ + +static void nv20_graph_rdi(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); + for (i = 0; i < 32; i++) + NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); + + nouveau_wait_for_idle(dev); +} int nv20_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = @@ -177,7 +177,7 @@ int nv20_graph_init(struct drm_device *dev) { NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0431); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0435); /* 0x4 = auto ctx switch */ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); NV_WRITE(0x40009C , 0x00000040); From aa2c3379914fc6fea63bfcfd8579ab6cd8d70a68 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 30 Sep 2007 12:03:22 +0300 Subject: [PATCH 344/437] nouveau: nv28 graph context init --- shared-core/nv30_graph.c | 160 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 156 insertions(+), 4 deletions(-) diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 108412ed..951947df 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -35,12 +35,162 @@ static void nv28_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { + struct drm_nouveau_private *dev_priv = dev->dev_private; int i; - (void)dev; +/* +write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements: ++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 ++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 ++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000 ++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000 ++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303 ++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000 ++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8 + ++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000 ++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000 ++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 ++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000 ++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740d1c: 00000000 00000000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000); + INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000); + INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000); + INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101); + INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111); + INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080); + INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000); + INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001); + INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000); + INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000); + INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303); + INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303); + INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303); + INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303); + INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000); + INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000); + INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000); + INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000); + INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000); + INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000); + INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000); + INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000); + INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8); + INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8); + INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8); + INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8); + INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008); + INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008); + INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008); + INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008); + for (i=0; i<16; ++i) + INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000); + INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7ffff); + +/* +write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: ++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0 ++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000 ++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000 ++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +... ++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +... ++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000 ++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080); + INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000); + INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040); + INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080); + INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0); + INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001); + INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000); + INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001); + INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000); + INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000); + for (i=0; i<0x880; i+=4) { + INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9); + INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c); + INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b); + INSTANCE_WR(ctx, (0x1b04/4)+i+3, 0x00000000); + } + +/* +write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements: ++0x00742e24: 3f800000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x2704/4), 0x3f800000); + +/* +write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements: ++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 ++0x00742e84: 00000000 bf800000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000); + INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000); + INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000); + INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000); + INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000); + INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000); + INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000); + +/* +write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements: ++0x00742e34: 00000000 3f800000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000); + +/* +write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements: ++0x00742e94: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements: ++0x00743804: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements: ++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000); + +/* +write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements: ++0x007437d4: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements: ++0x00743824: 00000000 000003f8 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8); + +/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */ + INSTANCE_WR(ctx, 0x3468/4, 0x002fe000); + +/* +write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements: ++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c +*/ + for (i=0; i<8; ++i) + INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c); } -static void nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +static void nv30_31_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; @@ -933,7 +1083,8 @@ static void nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gp INSTANCE_WR(ctx, 0x386c/4, 0xbf800000); } -static void nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +static void nv34_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; @@ -1826,7 +1977,8 @@ static void nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuob INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000); } -static void nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +static void nv35_36_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; From a67060c810613059b71c14e9fa91ea114fcf0106 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 30 Sep 2007 18:14:24 +0300 Subject: [PATCH 345/437] nouveau: graph ctx init nv25 According to mmio_trace_900XGL.tar.bz2 by Evan Fraser the nv25 init is exactly the same as nv28 init. --- shared-core/nv30_graph.c | 1 + 1 file changed, 1 insertion(+) diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 951947df..0ec4f096 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -2879,6 +2879,7 @@ int nv30_graph_create_context(struct nouveau_channel *chan) int ret; switch (dev_priv->chipset) { + case 0x25: case 0x28: ctx_size = NV28_GRCTX_SIZE; ctx_init = nv28_graph_context_init; From 205403aea8213ffc0e36f4103d78d62bf1584a69 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 30 Sep 2007 21:10:06 +0300 Subject: [PATCH 346/437] nouveau: nv30 graph function renames, removed nv20_graph.c All nv30 functions in nv30_graph.c that can be used on nv20 are renamed as accordingly. nv20 specific parts from nv20_graph.c are moved into nv30_graph.c. --- linux-core/Makefile.kernel | 2 +- linux-core/nv20_graph.c | 1 - shared-core/nouveau_drv.h | 14 +- shared-core/nouveau_state.c | 18 +-- shared-core/nv20_graph.c | 251 ------------------------------------ shared-core/nv30_graph.c | 144 ++++++++++++++++++--- 6 files changed, 137 insertions(+), 293 deletions(-) delete mode 120000 linux-core/nv20_graph.c delete mode 100644 shared-core/nv20_graph.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index b282bd05..6a06d867 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ - nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ + nv04_graph.o nv10_graph.o nv30_graph.o \ nv40_graph.o nv50_graph.o \ nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv20_graph.c b/linux-core/nv20_graph.c deleted file mode 120000 index 73049914..00000000 --- a/linux-core/nv20_graph.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/nv20_graph.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 02a4ee75..85a2dd57 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -490,22 +490,14 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *); extern int nv10_graph_load_context(struct nouveau_channel *); extern int nv10_graph_save_context(struct nouveau_channel *); -/* nv20_graph.c */ -extern void nouveau_nv20_context_switch(struct drm_device *); -extern int nv20_graph_init(struct drm_device *); -extern void nv20_graph_takedown(struct drm_device *); +/* nv30_graph.c */ extern int nv20_graph_create_context(struct nouveau_channel *); extern void nv20_graph_destroy_context(struct nouveau_channel *); extern int nv20_graph_load_context(struct nouveau_channel *); extern int nv20_graph_save_context(struct nouveau_channel *); - -/* nv30_graph.c */ +extern int nv20_graph_init(struct drm_device *); +extern void nv20_graph_takedown(struct drm_device *); extern int nv30_graph_init(struct drm_device *); -extern void nv30_graph_takedown(struct drm_device *); -extern int nv30_graph_create_context(struct nouveau_channel *); -extern void nv30_graph_destroy_context(struct nouveau_channel *); -extern int nv30_graph_load_context(struct nouveau_channel *); -extern int nv30_graph_save_context(struct nouveau_channel *); /* nv40_graph.c */ extern int nv40_graph_init(struct drm_device *); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index f8dd3ad5..cba12b57 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -166,10 +166,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.takedown = nv10_fb_takedown; engine->graph.init = nv20_graph_init; engine->graph.takedown = nv20_graph_takedown; - engine->graph.create_context = nv30_graph_create_context; - engine->graph.destroy_context = nv30_graph_destroy_context; - engine->graph.load_context = nv30_graph_load_context; - engine->graph.save_context = nv30_graph_save_context; + engine->graph.create_context = nv20_graph_create_context; + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.save_context = nv20_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv10_fifo_create_context; @@ -192,11 +192,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; engine->graph.init = nv30_graph_init; - engine->graph.takedown = nv30_graph_takedown; - engine->graph.create_context = nv30_graph_create_context; - engine->graph.destroy_context = nv30_graph_destroy_context; - engine->graph.load_context = nv30_graph_load_context; - engine->graph.save_context = nv30_graph_save_context; + engine->graph.takedown = nv20_graph_takedown; + engine->graph.create_context = nv20_graph_create_context; + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.save_context = nv20_graph_save_context; engine->fifo.init = nouveau_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.create_context = nv10_fifo_create_context; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c deleted file mode 100644 index de97a591..00000000 --- a/shared-core/nv20_graph.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright 2007 Matthieu CASTET - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "drmP.h" -#include "drm.h" -#include "nouveau_drv.h" -#include "nouveau_drm.h" - -#define NV20_GRCTX_SIZE (3529*4) - -#if 0 -int nv20_graph_create_context(struct nouveau_channel *chan) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - unsigned int ctx_size = NV20_GRCTX_SIZE; - int ret; - - if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramin_grctx))) - return ret; - - /* Initialise default context values */ - INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); /* CTX_USER */ - - INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, - chan->ramin_grctx->instance >> 4); - return 0; -} - -void nv20_graph_destroy_context(struct nouveau_channel *chan) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - - INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); -} - -/* Save current context (from PGRAPH) into the channel's context - */ -int nv20_graph_save_context(struct nouveau_channel *chan) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t instance; - - instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id); - if (!instance) { - return -EINVAL; - } - if (instance != (chan->ramin_grctx->instance >> 4)) - DRM_ERROR("nv20_graph_save_context : bad instance\n"); - - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 2 /* save ctx */); - return 0; -} - - -/* Restore the context for a specific channel into PGRAPH - */ -int nv20_graph_load_context(struct nouveau_channel *chan) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t instance; - - instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id); - if (!instance) { - return -EINVAL; - } - if (instance != (chan->ramin_grctx->instance >> 4)) - DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); - - NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24); - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */); - return 0; -} - -void nouveau_nv20_context_switch(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *next, *last; - int chid; - - chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - next = dev_priv->fifos[chid]; - - chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); - last = dev_priv->fifos[chid]; - - DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", - last->id, next->id); - - NV_WRITE(NV04_PGRAPH_FIFO,0x0); - - nv20_graph_save_context(last); - - nouveau_wait_for_idle(dev); - - NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); - - nv20_graph_load_context(next); - - nouveau_wait_for_idle(dev); - - if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != next->id) - DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", next->id, NV_READ(NV10_PGRAPH_CTX_USER) >> 24); - - NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); - NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); - - NV_WRITE(NV04_PGRAPH_FIFO,0x1); -} -#endif /* 0 */ - -static void nv20_graph_rdi(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i; - - NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); - for (i = 0; i < 32; i++) - NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); - - nouveau_wait_for_idle(dev); -} - -int nv20_graph_init(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - uint32_t tmp, vramsz; - int ret, i; - - NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & - ~NV_PMC_ENABLE_PGRAPH); - NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | - NV_PMC_ENABLE_PGRAPH); - - /* Create Context Pointer Table */ - dev_priv->ctx_table_size = 32 * 4; - if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, - dev_priv->ctx_table_size, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &dev_priv->ctx_table))) - return ret; - - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, - dev_priv->ctx_table->instance >> 4); - - //XXX need to be done and save/restore for each fifo ??? - nv20_graph_rdi(dev); - - NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); - NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); - - NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); - NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); - NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0435); /* 0x4 = auto ctx switch */ - NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); - NV_WRITE(0x40009C , 0x00000040); - - if (dev_priv->chipset >= 0x25) { - NV_WRITE(0x400890, 0x00080000); - NV_WRITE(0x400610, 0x304B1FB6); - NV_WRITE(0x400B80, 0x18B82880); - NV_WRITE(0x400B84, 0x44000000); - NV_WRITE(0x400098, 0x40000080); - NV_WRITE(0x400B88, 0x000000ff); - } else { - NV_WRITE(0x400880, 0x00080000); - NV_WRITE(0x400094, 0x00000005); - NV_WRITE(0x400B80, 0x45CAA208); - NV_WRITE(0x400B84, 0x24000000); - NV_WRITE(0x400098, 0x00000040); - NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); - NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); - NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); - NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); - } - - /* copy tile info from PFB */ - for (i=0; idev_private; - - nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); -} - diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 0ec4f096..8ca1f84c 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -1,7 +1,3 @@ -/* - * Based on nv40_graph.c - * Someday this will all go away... - */ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" @@ -25,14 +21,14 @@ /*#define NV20_GRCTX_SIZE (3529*4)*/ -#define NV28_GRCTX_SIZE (3529*4) +#define NV25_GRCTX_SIZE (3529*4) #define NV30_31_GRCTX_SIZE (22392) #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) -static void nv28_graph_context_init(struct drm_device *dev, +static void nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -2868,9 +2864,10 @@ static void nv35_36_graph_context_init(struct drm_device *dev, INSTANCE_WR(ctx, 0x385c/4, 0x40000000); INSTANCE_WR(ctx, 0x3860/4, 0x3f800000); INSTANCE_WR(ctx, 0x3868/4, 0xbf800000); - INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);} + INSTANCE_WR(ctx, 0x3870/4, 0xbf800000); +} -int nv30_graph_create_context(struct nouveau_channel *chan) +int nv20_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -2881,8 +2878,8 @@ int nv30_graph_create_context(struct nouveau_channel *chan) switch (dev_priv->chipset) { case 0x25: case 0x28: - ctx_size = NV28_GRCTX_SIZE; - ctx_init = nv28_graph_context_init; + ctx_size = NV25_GRCTX_SIZE; + ctx_init = nv25_graph_context_init; break; case 0x30: case 0x31: @@ -2925,7 +2922,7 @@ int nv30_graph_create_context(struct nouveau_channel *chan) return 0; } -void nv30_graph_destroy_context(struct nouveau_channel *chan) +void nv20_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -2954,7 +2951,7 @@ nouveau_graph_wait_idle(struct drm_device *dev) return 0; } -int nv30_graph_load_context(struct nouveau_channel *chan) +int nv20_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -2971,7 +2968,7 @@ int nv30_graph_load_context(struct nouveau_channel *chan) return nouveau_graph_wait_idle(dev); } -int nv30_graph_save_context(struct nouveau_channel *chan) +int nv20_graph_save_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -2988,6 +2985,120 @@ int nv30_graph_save_context(struct nouveau_channel *chan) return nouveau_graph_wait_idle(dev); } +static void nv20_graph_rdi(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); + for (i = 0; i < 32; i++) + NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); + + nouveau_wait_for_idle(dev); +} + +int nv20_graph_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; + uint32_t tmp, vramsz; + int ret, i; + + NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); + NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + + /* Create Context Pointer Table */ + dev_priv->ctx_table_size = 32 * 4; + if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, + dev_priv->ctx_table_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->ctx_table))) + return ret; + + NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, + dev_priv->ctx_table->instance >> 4); + + //XXX need to be done and save/restore for each fifo ??? + nv20_graph_rdi(dev); + + NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); + NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); + NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0435); /* 0x4 = auto ctx switch */ + NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); + NV_WRITE(0x40009C , 0x00000040); + + if (dev_priv->chipset >= 0x25) { + NV_WRITE(0x400890, 0x00080000); + NV_WRITE(0x400610, 0x304B1FB6); + NV_WRITE(0x400B80, 0x18B82880); + NV_WRITE(0x400B84, 0x44000000); + NV_WRITE(0x400098, 0x40000080); + NV_WRITE(0x400B88, 0x000000ff); + } else { + NV_WRITE(0x400880, 0x00080000); + NV_WRITE(0x400094, 0x00000005); + NV_WRITE(0x400B80, 0x45CAA208); + NV_WRITE(0x400B84, 0x24000000); + NV_WRITE(0x400098, 0x00000040); + NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); + NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); + NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); + NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); + } + + /* copy tile info from PFB */ + for (i=0; idev_private; + + nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); +} + int nv30_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -3077,10 +3188,3 @@ int nv30_graph_init(struct drm_device *dev) return 0; } -void nv30_graph_takedown(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); -} - From aa135ba8e86d43a738973a25d638b7dc4cdddc55 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 30 Sep 2007 22:04:53 +0300 Subject: [PATCH 347/437] nouveau: rename nv30_graph.c to nv20_graph.c --- linux-core/Makefile.kernel | 2 +- linux-core/nv20_graph.c | 1 + linux-core/nv30_graph.c | 1 - shared-core/nouveau_drv.h | 2 +- shared-core/{nv30_graph.c => nv20_graph.c} | 0 5 files changed, 3 insertions(+), 3 deletions(-) create mode 120000 linux-core/nv20_graph.c delete mode 120000 linux-core/nv30_graph.c rename shared-core/{nv30_graph.c => nv20_graph.c} (100%) diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6a06d867..0eb10783 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ - nv04_graph.o nv10_graph.o nv30_graph.o \ + nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o \ nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv20_graph.c b/linux-core/nv20_graph.c new file mode 120000 index 00000000..73049914 --- /dev/null +++ b/linux-core/nv20_graph.c @@ -0,0 +1 @@ +../shared-core/nv20_graph.c \ No newline at end of file diff --git a/linux-core/nv30_graph.c b/linux-core/nv30_graph.c deleted file mode 120000 index 25568ecb..00000000 --- a/linux-core/nv30_graph.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/nv30_graph.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 85a2dd57..e5cef075 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -490,7 +490,7 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *); extern int nv10_graph_load_context(struct nouveau_channel *); extern int nv10_graph_save_context(struct nouveau_channel *); -/* nv30_graph.c */ +/* nv20_graph.c */ extern int nv20_graph_create_context(struct nouveau_channel *); extern void nv20_graph_destroy_context(struct nouveau_channel *); extern int nv20_graph_load_context(struct nouveau_channel *); diff --git a/shared-core/nv30_graph.c b/shared-core/nv20_graph.c similarity index 100% rename from shared-core/nv30_graph.c rename to shared-core/nv20_graph.c From 9cd6ece3079373eddff320a1d3e09bfe2a35be83 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sun, 30 Sep 2007 23:09:30 +0200 Subject: [PATCH 348/437] nouveau : nv20_graph replace nouveau_graph_wait_idle by nouveau_wait_for_idle Also clean PGRAPH_CHANNEL macros --- shared-core/nouveau_reg.h | 4 +--- shared-core/nv20_graph.c | 30 +++++++----------------------- 2 files changed, 8 insertions(+), 26 deletions(-) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 21133d98..59b69547 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -286,10 +286,8 @@ #define NV10_PGRAPH_DMA_PITCH 0x00400770 #define NV10_PGRAPH_DVD_COLORFMT 0x00400774 #define NV10_PGRAPH_SCALED_FORMAT 0x00400778 -#define NV10_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 -#define NV10_PGRAPH_CHANNEL_CTX_SIZE 0x00400784 +#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 #define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 -#define NV10_PGRAPH_CHANNEL_CTX_POINTER 0x00400788 #define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 #define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 #define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 8ca1f84c..f87d3138 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -2933,24 +2933,6 @@ void nv20_graph_destroy_context(struct nouveau_channel *chan) INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); } -static int -nouveau_graph_wait_idle(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int tv = 1000; - - while (tv--) { - if (NV_READ(NV04_PGRAPH_STATUS) == 0) - break; - } - - if (NV_READ(NV04_PGRAPH_STATUS)) { - DRM_ERROR("timeout!\n"); - return -EBUSY; - } - return 0; -} - int nv20_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; @@ -2965,7 +2947,8 @@ int nv20_graph_load_context(struct nouveau_channel *chan) NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); - return nouveau_graph_wait_idle(dev); + nouveau_wait_for_idle(dev); + return 0; } int nv20_graph_save_context(struct nouveau_channel *chan) @@ -2982,7 +2965,8 @@ int nv20_graph_save_context(struct nouveau_channel *chan) NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); - return nouveau_graph_wait_idle(dev); + nouveau_wait_for_idle(dev); + return 0; } static void nv20_graph_rdi(struct drm_device *dev) { @@ -3015,7 +2999,7 @@ int nv20_graph_init(struct drm_device *dev) { &dev_priv->ctx_table))) return ret; - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, dev_priv->ctx_table->instance >> 4); //XXX need to be done and save/restore for each fifo ??? @@ -3118,7 +3102,7 @@ int nv30_graph_init(struct drm_device *dev) &dev_priv->ctx_table))) return ret; - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, dev_priv->ctx_table->instance >> 4); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); @@ -3144,7 +3128,7 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(0x400B84, 0x0c000000); NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f); NV_WRITE(0x4000c0, 0x00000016); - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, 0x000014e4); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, 0x000014e4); /* copy tile info from PFB */ for (i=0; i Date: Sun, 30 Sep 2007 23:19:39 +0200 Subject: [PATCH 349/437] nouveau : nv30 remove harcoded NV20_PGRAPH_CHANNEL_CTX_TABLE --- shared-core/nv20_graph.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index f87d3138..233afd37 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -3002,7 +3002,6 @@ int nv20_graph_init(struct drm_device *dev) { NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, dev_priv->ctx_table->instance >> 4); - //XXX need to be done and save/restore for each fifo ??? nv20_graph_rdi(dev); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); @@ -3094,16 +3093,16 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); - /* Create Context Pointer Table */ - dev_priv->ctx_table_size = 32 * 4; + /* Create Context Pointer Table */ + dev_priv->ctx_table_size = 32 * 4; if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, dev_priv->ctx_table_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ctx_table))) return ret; - NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, - dev_priv->ctx_table->instance >> 4); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, + dev_priv->ctx_table->instance >> 4); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -3128,7 +3127,6 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(0x400B84, 0x0c000000); NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f); NV_WRITE(0x4000c0, 0x00000016); - NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, 0x000014e4); /* copy tile info from PFB */ for (i=0; i Date: Mon, 1 Oct 2007 03:28:10 +0200 Subject: [PATCH 350/437] nouveau: flip the ctx switch bit on. it seems to be ignored on nv34 but causes nv30 issues. --- shared-core/nv20_graph.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 233afd37..8291f214 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -3111,7 +3111,7 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); NV_WRITE(0x400890, 0x01b463ff); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf3de0471); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf3de0475); NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); NV_WRITE(0x400B80, 0x1003d888); From 69fcfb413e72ad2204d306f20af6547819e040da Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Mon, 1 Oct 2007 22:21:23 +0200 Subject: [PATCH 351/437] nouveau: Fix dereferencing a NULL pointer when erroring out during initialization. --- shared-core/nouveau_state.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index cba12b57..cb19c880 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -423,12 +423,15 @@ void nouveau_lastclose(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - nouveau_card_takedown(dev); + /* In the case of an error dev_priv may not be be allocated yet */ + if (dev_priv && dev_priv->card_type) { + nouveau_card_takedown(dev); - if(dev_priv->fb_mtrr>0) - { - drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); - dev_priv->fb_mtrr=0; + if(dev_priv->fb_mtrr>0) + { + drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); + dev_priv->fb_mtrr=0; + } } } From b0473699ed7bef4efd0742e0a350d345a7cc9a0c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 2 Oct 2007 15:48:28 +1000 Subject: [PATCH 352/437] ttm: returning into dummy causes a buffer object leak as nobody ever derefs dummy, however not returning does the deref correctly. --- linux-core/drm_bo.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index bcbcc662..4e735770 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1767,7 +1767,6 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct drm_bo_op_arg *arg = data; struct drm_bo_op_req *req = &arg->d.req; struct drm_bo_info_rep rep; - struct drm_buffer_object *dummy; unsigned long next = 0; void __user *curuserarg = NULL; int ret; @@ -1804,7 +1803,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, - &rep, &dummy); + &rep, NULL); break; case drm_bo_fence: ret = -EINVAL; From ffa3173ec4bb5a310b3f8539bb6c2f8589ce2ed5 Mon Sep 17 00:00:00 2001 From: Stuart Bennett Date: Tue, 2 Oct 2007 15:45:30 +0100 Subject: [PATCH 353/437] nouveau: nv20 graph context init --- shared-core/nv20_graph.c | 140 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 138 insertions(+), 2 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 8291f214..213d60cc 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -19,14 +19,146 @@ * */ -/*#define NV20_GRCTX_SIZE (3529*4)*/ - +#define NV20_GRCTX_SIZE (3580*4) #define NV25_GRCTX_SIZE (3529*4) #define NV30_31_GRCTX_SIZE (22392) #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) +static void nv20_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; +/* +write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements: ++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 ++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 + ++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 ++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000 ++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303 ++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000 ++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000 ++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 ++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 ++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 ++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 + ++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000 ++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +... +*/ + INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000); + INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000); + INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000); + INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101); + INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111); + INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); + for (i = 0; i < 16; ++i) + INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); + INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7ffff); + INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); + INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); + INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); + INSTANCE_WR(ctx, (0x33c/4)+183, 0x00004000); + INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); + +/* +... ++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +... ++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +... +*/ + for (i = 0; i < 0x880; i += 0x10) { + INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9); + INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c); + INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b); + } + +/* +write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements: ++0x00742fbc: 3f800000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x281c/4), 0x3f800000); + +/* +write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements: ++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 ++0x0074301c: 00000000 bf800000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000); + INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000); + INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000); + INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000); + INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000); + INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000); + INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000); + +/* +write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements: ++0x00742fcc: 00000000 3f800000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000); + +/* +write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements: ++0x0074302c: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements: ++0x00743c9c: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements: ++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000); + +/* +write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements: ++0x00743c6c: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements: ++0x00743ccc: 00000000 000003f8 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8); + +/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */ + INSTANCE_WR(ctx, 0x3540/4, 0x002fe000); + +/* +write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements: ++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c +*/ + for (i = 0; i < 8; ++i) + INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c); +} static void nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) @@ -2876,6 +3008,10 @@ int nv20_graph_create_context(struct nouveau_channel *chan) int ret; switch (dev_priv->chipset) { + case 0x20: + ctx_size = NV20_GRCTX_SIZE; + ctx_init = nv20_graph_context_init; + break; case 0x25: case 0x28: ctx_size = NV25_GRCTX_SIZE; From afc57ef1dfb5bdf17411505d4dfbb03863a870bf Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 2 Oct 2007 21:51:14 +0300 Subject: [PATCH 354/437] nouveau: fix nv25_graph_context_init It was writing 4x the data in a loop. --- shared-core/nv20_graph.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 213d60cc..6b4c25e0 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -256,11 +256,10 @@ write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001); INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000); INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000); - for (i=0; i<0x880; i+=4) { + for (i=0; i < 0x880/4; i+=4) { INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9); INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c); INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b); - INSTANCE_WR(ctx, (0x1b04/4)+i+3, 0x00000000); } /* From a72eb27fbc7a66e35018ffbcb5137cfaaf4049aa Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Tue, 2 Oct 2007 21:56:01 +0300 Subject: [PATCH 355/437] nouveau: nv20 graph_create_context difference nv20 writes the chan->id to a different place than nv28. This still does not make nv20 run nv10_demo. --- shared-core/nv20_graph.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 6b4c25e0..aba5a7e4 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -3004,12 +3004,14 @@ int nv20_graph_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); unsigned int ctx_size; + unsigned int idoffs = 0x28/4; int ret; switch (dev_priv->chipset) { case 0x20: ctx_size = NV20_GRCTX_SIZE; ctx_init = nv20_graph_context_init; + idoffs = 0; break; case 0x25: case 0x28: @@ -3048,7 +3050,7 @@ int nv20_graph_create_context(struct nouveau_channel *chan) ctx_init(dev, chan->ramin_grctx->gpuobj); /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ - INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x28/4, (chan->id<<24)|0x1); + INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1); /* CTX_USER */ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, From 7f99fd5d7aa1f0d2463907d9d8c483b6249ac831 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 3 Oct 2007 14:08:18 -0700 Subject: [PATCH 356/437] First round of byte-ordering fixes for PowerPC. This isn't 100% as command submission via PCI-e GART buffers doesn't work. I've hacked around that for the time being. This is essentially the code that was used at the POWER.org event to show Bimini. --- linux-core/xgi_cmdlist.c | 50 +++++++++++++++++++++++++++++----------- linux-core/xgi_drv.c | 6 ++--- linux-core/xgi_drv.h | 4 ++-- linux-core/xgi_fence.c | 4 ++-- linux-core/xgi_misc.c | 24 +++++++++---------- linux-core/xgi_pcie.c | 3 ++- 6 files changed, 58 insertions(+), 33 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 261f4e13..35f7e1bd 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -45,7 +45,7 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", map->handle, addr, data); #endif - DRM_WRITE32(map, addr, data); + DRM_WRITE32(map, addr, cpu_to_le32(data)); } @@ -98,6 +98,25 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, const struct xgi_cmd_info *const pCmdInfo = (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); +#if __BIG_ENDIAN + const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); + unsigned i; + unsigned j; + + xgi_waitfor_pci_idle(info); + for (j = 4; j < pCmdInfo->size; j += 4) { + u32 reg = ptr[j]; + + for (i = 1; i < 4; i++) { + if ((reg & 1) != 0) { + const unsigned r = 0x2100 | (reg & 0x0fe); + DRM_WRITE32(info->mmio_map, r, ptr[j + i]); + } + + reg >>= 8; + } + } +#else u32 begin[4]; @@ -138,16 +157,17 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, xgi_emit_flush(info, FALSE); } - info->cmdring.last_ptr[1] = begin[1]; - info->cmdring.last_ptr[2] = begin[2]; - info->cmdring.last_ptr[3] = begin[3]; + info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]); + info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]); + info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]); DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = begin[0]; + info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]); triggerHWCommandList(info); } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); +#endif drm_fence_flush_old(info->dev, 0, info->next_sequence); return 0; } @@ -258,6 +278,8 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) const unsigned int flush_size = sizeof(flush_command); u32 *batch_addr; u32 hw_addr; + unsigned int i; + /* check buf is large enough to contain a new flush batch */ if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) { @@ -269,18 +291,20 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - (void) memcpy(batch_addr, flush_command, flush_size); - - if (stop) { - *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK; + for (i = 0; i < (flush_size / 4); i++) { + batch_addr[i] = cpu_to_le32(flush_command[i]); } - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); - info->cmdring.last_ptr[2] = hw_addr >> 4; + if (stop) { + *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK); + } + + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4)); + info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4); info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) - | (BEGIN_VALID_MASK); + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK)); triggerHWCommandList(info); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bc6873a9..4e66197e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -351,9 +351,9 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; struct xgi_info *info = dev->dev_private; - const u32 irq_bits = DRM_READ32(info->mmio_map, + const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map, (0x2800 - + M2REG_AUTO_LINK_STATUS_ADDRESS)) + + M2REG_AUTO_LINK_STATUS_ADDRESS))) & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK | M2REG_ACTIVE_INTERRUPT_0_MASK | M2REG_ACTIVE_INTERRUPT_2_MASK @@ -363,7 +363,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) if (irq_bits != 0) { DRM_WRITE32(info->mmio_map, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, - M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); + cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits)); xgi_fence_handler(dev); return IRQ_HANDLED; } else { diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index a68dc03b..d9a94f5f 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,11 +35,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070918" +#define DRIVER_DATE "20071003" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 3 #include "xgi_cmdlist.h" #include "xgi_drm.h" diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index 22e1dced..a98a8422 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -48,8 +48,8 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if (pending_flush_types) { if (pending_flush_types & DRM_FENCE_TYPE_EXE) { - const u32 begin_id = DRM_READ32(info->mmio_map, - 0x2820) + const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map, + 0x2820)) & BEGIN_BEGIN_IDENTIFICATION_MASK; if (begin_id != info->complete_sequence) { diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 50a721c0..f39b3bb5 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -38,12 +38,12 @@ static unsigned int s_invalid_begin = 0; static bool xgi_validate_signal(struct drm_map * map) { - if (DRM_READ32(map, 0x2800) & 0x001c0000) { + if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) { u16 check; /* Check Read back status */ DRM_WRITE8(map, 0x235c, 0x80); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { return FALSE; @@ -51,28 +51,28 @@ static bool xgi_validate_signal(struct drm_map * map) /* Check RO channel */ DRM_WRITE8(map, 0x235c, 0x83); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RW channel */ DRM_WRITE8(map, 0x235c, 0x88); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RO channel outstanding */ DRM_WRITE8(map, 0x235c, 0x8f); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { return FALSE; } /* Check RW channel outstanding */ DRM_WRITE8(map, 0x235c, 0x90); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { return FALSE; } @@ -89,7 +89,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) int time_out = 0xffff; DRM_WRITE8(map, 0xb057, 8); - while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) { + while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) { while (0 != ((--time_out) & 0xfff)) /* empty */ ; @@ -100,7 +100,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) u8 old_36; DRM_INFO("Can not reset back 0x%x!\n", - DRM_READ32(map, 0x2800)); + le32_to_cpu(DRM_READ32(map, 0x2800))); DRM_WRITE8(map, 0xb057, 0); @@ -137,7 +137,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) bool xgi_ge_irq_handler(struct xgi_info * info) { - const u32 int_status = DRM_READ32(info->mmio_map, 0x2810); + const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810)); bool is_support_auto_reset = FALSE; /* Check GE on/off */ @@ -146,7 +146,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* We got GE stall interrupt. */ DRM_WRITE32(info->mmio_map, 0x2810, - int_status | 0x04000000); + cpu_to_le32(int_status | 0x04000000)); if (is_support_auto_reset) { static cycles_t last_tick; @@ -176,7 +176,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) } else if (0 != (0x1 & int_status)) { s_invalid_begin++; DRM_WRITE32(info->mmio_map, 0x2810, - (int_status & ~0x01) | 0x04000000); + cpu_to_le32((int_status & ~0x01) | 0x04000000)); } return TRUE; @@ -326,7 +326,7 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) unsigned int same_count = 0; while (idleCount < 5) { - const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) + const u32 status = le32_to_cpu(DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)) & IDLE_MASK; if (status == old_status) { diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index a7d3ea24..4becf35b 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -40,7 +40,8 @@ void xgi_gart_flush(struct drm_device *dev) DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); /* Set GART base address to HW */ - DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); + DRM_WRITE32(info->mmio_map, 0xB034, + cpu_to_le32(info->gart_info.bus_addr)); /* Flush GART table. */ DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); From 0379919e99542bc50cf9d0a8a3996b2896ec4e64 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 3 Oct 2007 14:12:16 -0700 Subject: [PATCH 357/437] Use 'ifdef __BIG_ENDIAN' instead of 'if __BIG_ENDIAN' --- linux-core/xgi_cmdlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 35f7e1bd..c25b0e0d 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -98,7 +98,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, const struct xgi_cmd_info *const pCmdInfo = (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); -#if __BIG_ENDIAN +#ifdef __BIG_ENDIAN const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); unsigned i; unsigned j; From 7fbd10d93310345164d1e65da281848b05493797 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Thu, 4 Oct 2007 03:43:59 +0200 Subject: [PATCH 358/437] nouveau: nv2a drm context switch support. --- shared-core/nv20_graph.c | 74 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 69 insertions(+), 5 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index aba5a7e4..9edab594 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -4,14 +4,19 @@ #include "nouveau_drm.h" /* + * NV20 + * ----- * There are 3 families : - * NV30 is 0x10de:0x030* - * NV31 is 0x10de:0x031* + * NV20 is 0x10de:0x020* + * NV25/28 is 0x10de:0x025* / 0x10de:0x028* + * NV2A is 0x10de:0x02A0 * + * NV30 + * ----- + * There are 3 families : + * NV30/31 is 0x10de:0x030* / 0x10de:0x031* * NV34 is 0x10de:0x032* - * - * NV35 is 0x10de:0x033* (NV35 and NV36 are the same) - * NV36 is 0x10de:0x034* + * NV35/36 is 0x10de:0x033* / 0x10de:0x034* * * Not seen in the wild, no dumps (probably NV35) : * NV37 is 0x10de:0x00fc, 0x10de:0x00fd @@ -21,6 +26,7 @@ #define NV20_GRCTX_SIZE (3580*4) #define NV25_GRCTX_SIZE (3529*4) +#define NV2A_GRCTX_SIZE (3500*4) #define NV30_31_GRCTX_SIZE (22392) #define NV34_GRCTX_SIZE (18140) @@ -160,6 +166,59 @@ write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements: INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c); } +static void nv2a_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x33c/4, 0xffff0000); + for(i = 0x3a0; i< 0x3a8; i += 4) + INSTANCE_WR(ctx, i/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x47c/4, 0x00000101); + INSTANCE_WR(ctx, 0x490/4, 0x00000111); + INSTANCE_WR(ctx, 0x4a8/4, 0x44400000); + for(i = 0x4d4; i< 0x4e4; i += 4) + INSTANCE_WR(ctx, i/4, 0x00030303); + for(i = 0x4f4; i< 0x504; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080000); + for(i = 0x50c; i< 0x51c; i += 4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for(i = 0x51c; i< 0x52c; i += 4) + INSTANCE_WR(ctx, i/4, 0x000105b8); + for(i = 0x52c; i< 0x53c; i += 4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for(i = 0x55c; i< 0x59c; i += 4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x5fc/4, 0x00000001); + INSTANCE_WR(ctx, 0x604/4, 0x00004000); + INSTANCE_WR(ctx, 0x610/4, 0x00000001); + INSTANCE_WR(ctx, 0x618/4, 0x00040000); + INSTANCE_WR(ctx, 0x61c/4, 0x00010000); + + for (i=0x1a9c; i <= 0x22fc/4; i += 32) { + INSTANCE_WR(ctx, i/4 , 0x10700ff9); + INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); + INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); + } + + INSTANCE_WR(ctx, 0x269c/4, 0x3f800000); + INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x26dc/4, 0x40000000); + INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000); + INSTANCE_WR(ctx, 0x26ec/4, 0x40000000); + INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000); + INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000); + INSTANCE_WR(ctx, 0x2700/4, 0xbf800000); + INSTANCE_WR(ctx, 0x3024/4, 0x000fe000); + INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8); + INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000); + for(i = 0x341c; i< 0x343c; i += 4) + INSTANCE_WR(ctx, i/4, 0x001c527c); +} + static void nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { @@ -3018,6 +3077,11 @@ int nv20_graph_create_context(struct nouveau_channel *chan) ctx_size = NV25_GRCTX_SIZE; ctx_init = nv25_graph_context_init; break; + case 0x2a: + ctx_size = NV2A_GRCTX_SIZE; + ctx_init = nv2a_graph_context_init; + idoffs = 0; + break; case 0x30: case 0x31: ctx_size = NV30_31_GRCTX_SIZE; From 495bbbaadc93c574eb98dd2ad64bdca4d91d4152 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 4 Oct 2007 16:13:22 +1000 Subject: [PATCH 359/437] drm: fix page count calculation Also no need to do pre-populate work on single page --- linux-core/drm_bo_move.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index dae99181..2a35d45b 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -488,16 +488,6 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag BUG_ON(!ttm); - /* - * Populate the part we're mapping; - */ - - for (i=start_page; i< num_pages; ++i) { - d = drm_ttm_get_page(ttm, i); - if (!d) - return -ENOMEM; - } - if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { /* @@ -509,6 +499,15 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag map->page = drm_ttm_get_page(ttm, start_page); map->virtual = kmap(map->page); } else { + /* + * Populate the part we're mapping; + */ + + for (i = start_page; i< start_page + num_pages; ++i) { + d = drm_ttm_get_page(ttm, i); + if (!d) + return -ENOMEM; + } /* * We need to use vmap to get the desired page protection From 5ca12104f8a3eebecae6d238c1c456c8e6540ae3 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Tue, 2 Oct 2007 21:54:37 +0200 Subject: [PATCH 360/437] linux-drm: Obey device class requirements when detecting devices. --- linux-core/drm_drv.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 73598892..a09fa96e 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -321,6 +321,11 @@ int drm_init(struct drm_driver *driver, while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } /* is there already a driver loaded, or (short circuit saves work) */ /* does something like VesaFB have control of the memory region? */ if (pci_dev_driver(pdev) @@ -347,6 +352,11 @@ int drm_init(struct drm_driver *driver, pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } /* stealth mode requires a manual probe */ pci_dev_get(pdev); if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { From b510517d59efcb45cc7079743be967bee122b251 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Thu, 4 Oct 2007 09:31:46 +0200 Subject: [PATCH 361/437] nouveau: Switch over to using PMC_BOOT_0 for card detection. --- linux-core/nouveau_drv.c | 11 +- shared-core/drm_pciids.txt | 255 ------------------------------------ shared-core/nouveau_state.c | 74 ++++++++++- 3 files changed, 78 insertions(+), 262 deletions(-) diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index 6c73b0d3..01de67de 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -29,7 +29,16 @@ #include "drm_pciids.h" static struct pci_device_id pciidlist[] = { - nouveau_PCI_IDS + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + } }; extern struct drm_ioctl_desc nouveau_ioctls[]; diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 74e7e75a..05d32f2e 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -481,261 +481,6 @@ 0x10DE 0x009D NV40 "NVidia Quadro FX 4500" 0x10DE 0x009E NV40 "NVidia 0x009E" -[nouveau] -0x10de 0x0020 NV_04 "RIVA TNT" -0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro" -0x10de 0x0029 NV_04 "RIVA TNT2 Ultra" -0x10de 0x002a NV_04 "Riva TnT2" -0x10de 0x002b NV_04 "Riva TnT2" -0x10de 0x002c NV_04 "Vanta/Vanta LT" -0x10de 0x002d NV_04 "RIVA TNT2 Model 64/Model 64 Pro" -0x10de 0x002e NV_04 "Vanta" -0x10de 0x002f NV_04 "Vanta" -0x10de 0x0040 NV_40 "GeForce 6800 Ultra" -0x10de 0x0041 NV_40 "GeForce 6800" -0x10de 0x0042 NV_40 "GeForce 6800 LE" -0x10de 0x0043 NV_40 "NV40.3" -0x10de 0x0044 NV_40 "GeForce 6800 XT" -0x10de 0x0045 NV_40 "GeForce 6800 GT" -0x10de 0x0046 NV_40 "GeForce 6800 GT" -0x10de 0x0047 NV_40 "GeForce 6800 GS" -0x10de 0x0048 NV_40 "GeForce 6800 XT" -0x10de 0x0049 NV_40 "NV40GL" -0x10de 0x004d NV_40 "Quadro FX 4000" -0x10de 0x004e NV_40 "Quadro FX 4000" -0x10de 0x0090 NV_40 "GeForce 7800 GTX" -0x10de 0x0091 NV_40 "GeForce 7800 GTX" -0x10de 0x0092 NV_40 "GeForce 7800 GT" -0x10de 0x0093 NV_40 "GeForce 7800 GS" -0x10de 0x0095 NV_40 "GeForce 7800 SLI" -0x10de 0x0098 NV_40 "GeForce Go 7800" -0x10de 0x0099 NV_40 "GeForce Go 7800 GTX" -0x10de 0x009d NV_40 "Quadro FX4500" -0x10de 0x00a0 NV_04 "Aladdin TNT2" -0x10de 0x00c0 NV_40 "GeForce 6800 GS" -0x10de 0x00c1 NV_40 "GeForce 6800" -0x10de 0x00c2 NV_40 "GeForce 6800 LE" -0x10de 0x00c3 NV_40 "Geforce 6800 XT" -0x10de 0x00c8 NV_40 "GeForce Go 6800" -0x10de 0x00c9 NV_40 "GeForce Go 6800 Ultra" -0x10de 0x00cc NV_40 "Quadro FX Go1400" -0x10de 0x00cd NV_40 "Quadro FX 3450/4000 SDI" -0x10de 0x00ce NV_40 "Quadro FX 1400" -0x10de 0x00f0 NV_40 "GeForce 6800/GeForce 6800 Ultra" -0x10de 0x00f1 NV_40 "GeForce 6600/GeForce 6600 GT" -0x10de 0x00f2 NV_40 "GeForce 6600/GeForce 6600 GT" -0x10de 0x00f3 NV_40 "GeForce 6200" -0x10de 0x00f4 NV_40 "GeForce 6600 LE" -0x10de 0x00f5 NV_40 "GeForce 7800 GS" -0x10de 0x00f6 NV_40 "GeForce 6600 GS" -0x10de 0x00f8 NV_40 "Quadro FX 3400/4400" -0x10de 0x00f9 NV_40 "GeForce 6800 Ultra/GeForce 6800 GT" -0x10de 0x00fa NV_30 "GeForce PCX 5750" -0x10de 0x00fb NV_30 "GeForce PCX 5900" -0x10de 0x00fc NV_30 "Quadro FX 330/GeForce PCX 5300" -0x10de 0x00fd NV_30 "Quadro FX 330/Quadro NVS280" -0x10de 0x00fe NV_30 "Quadro FX 1300" -0x10de 0x00ff NV_17 "GeForce PCX 4300" -0x10de 0x0100 NV_10 "GeForce 256 SDR" -0x10de 0x0101 NV_10 "GeForce 256 DDR" -0x10de 0x0103 NV_10 "Quadro" -0x10de 0x0110 NV_11 "GeForce2 MX/MX 400" -0x10de 0x0111 NV_11 "GeForce2 MX 100 DDR/200 DDR" -0x10de 0x0112 NV_11 "GeForce2 Go" -0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go" -0x10de 0x0140 NV_40 "GeForce 6600 GT" -0x10de 0x0141 NV_40 "GeForce 6600" -0x10de 0x0142 NV_40 "GeForce 6600 LE" -0x10de 0x0143 NV_40 "GeForce 6600 VE" -0x10de 0x0144 NV_40 "GeForce Go 6600" -0x10de 0x0145 NV_40 "GeForce 6610 XL" -0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE" -0x10de 0x0147 NV_40 "GeForce 6700 XL" -0x10de 0x0148 NV_40 "GeForce Go 6600" -0x10de 0x0149 NV_40 "GeForce Go 6600 GT" -0x10de 0x014a NV_40 "Quadro NVS 440" -0x10de 0x014c NV_40 "Quadro FX 550" -0x10de 0x014d NV_17 "Quadro FX 550" -0x10de 0x014e NV_40 "Quadro FX 540" -0x10de 0x014f NV_40 "GeForce 6200" -0x10de 0x0150 NV_15 "GeForce2 GTS/Pro" -0x10de 0x0151 NV_15 "GeForce2 Ti" -0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner" -0x10de 0x0153 NV_15 "Quadro2 Pro" -0x10de 0x0160 NV_44 "GeForce 6500" -0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)" -0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)" -0x10de 0x0163 NV_44 "GeForce 6200 LE" -0x10de 0x0164 NV_44 "GeForce Go 6200" -0x10de 0x0165 NV_44 "Quadro NVS 285" -0x10de 0x0166 NV_44 "GeForce Go 6400" -0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache" -0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache" -0x10de 0x0169 NV_44 "GeForce 6250" -0x10de 0x0170 NV_17 "GeForce4 MX 460" -0x10de 0x0171 NV_17 "GeForce4 MX 440" -0x10de 0x0172 NV_17 "GeForce4 MX 420" -0x10de 0x0173 NV_17 "GeForce4 MX 440-SE" -0x10de 0x0174 NV_17 "GeForce4 440 Go" -0x10de 0x0175 NV_17 "GeForce4 420 Go" -0x10de 0x0176 NV_17 "GeForce4 420 Go 32M" -0x10de 0x0177 NV_17 "GeForce4 460 Go" -0x10de 0x0178 NV_17 "Quadro4 550 XGL" -0x10de 0x0179 NV_17 "GeForce4 420 Go 32M" -0x10de 0x017a NV_17 "Quadro4 200/400 NVS" -0x10de 0x017b NV_17 "Quadro4 550 XGL" -0x10de 0x017c NV_17 "Quadro4 500 GoGL" -0x10de 0x017d NV_17 "GeForce4 410 Go 16M" -0x10de 0x0181 NV_17 "GeForce4 MX 440 AGP 8x" -0x10de 0x0182 NV_17 "GeForce4 MX 440SE AGP 8x" -0x10de 0x0183 NV_17 "GeForce4 MX 420 AGP 8x" -0x10de 0x0185 NV_17 "GeForce4 MX 4000 AGP 8x" -0x10de 0x0186 NV_17 "GeForce4 448 Go" -0x10de 0x0187 NV_17 "GeForce4 488 Go" -0x10de 0x0188 NV_17 "Quadro4 580 XGL" -0x10de 0x018a NV_17 "Quadro4 NVS AGP 8x" -0x10de 0x018b NV_17 "Quadro4 380 XGL" -0x10de 0x018c NV_17 "Quadro NVS 50 PCI" -0x10de 0x018d NV_17 "GeForce4 448 Go" -0x10de 0x0191 NV_50 "GeForce 8800 GTX" -0x10de 0x0193 NV_50 "GeForce 8800 GTS" -0x10de 0x0194 NV_50 "GeForce 8800 Ultra" -0x10de 0x019d NV_50 "Quadro FX 5600" -0x10de 0x019e NV_50 "Quadro FX 4600" -0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" -0x10de 0x01d1 NV_44 "GeForce 7300 LE" -0x10de 0x01d3 NV_44 "Geforce 7300 SE" -0x10de 0x01d6 NV_44 "GeForce Go 7200" -0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300" -0x10de 0x01d8 NV_44 "GeForce Go 7400" -0x10de 0x01d9 NV_44 "GeForce Go 7400 GS" -0x10de 0x01da NV_44 "Quadro NVS 110M" -0x10de 0x01db NV_44 "Quadro NVS 120M" -0x10de 0x01dc NV_44 "Quadro FX 350M" -0x10de 0x01dd NV_44 "GeForce 7500 LE" -0x10de 0x01de NV_44 "Quadro FX 350" -0x10de 0x01df NV_44 "GeForce 7300 GS" -0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU" -0x10de 0x0200 NV_20 "GeForce3" -0x10de 0x0201 NV_20 "GeForce3 Ti 200" -0x10de 0x0202 NV_20 "GeForce3 Ti 500" -0x10de 0x0203 NV_20 "Quadro DCC" -0x10de 0x0211 NV_40 "GeForce 6800" -0x10de 0x0212 NV_40 "GeForce 6800 LE" -0x10de 0x0215 NV_40 "GeForce 6800 GT" -0x10de 0x0218 NV_40 "GeForce 6800 XT" -0x10de 0x0221 NV_44 "GeForce 6200" -0x10de 0x0222 NV_44 "GeForce 6200 A-LE" -0x10de 0x0240 NV_44 "GeForce 6150" -0x10de 0x0241 NV_44 "GeForce 6150 LE" -0x10de 0x0242 NV_44 "GeForce 6100" -0x10de 0x0244 NV_44 "GeForce Go 6150" -0x10de 0x0247 NV_44 "GeForce Go 6100" -0x10de 0x0250 NV_25 "GeForce4 Ti 4600" -0x10de 0x0251 NV_25 "GeForce4 Ti 4400" -0x10de 0x0252 NV_25 "GeForce4 Ti" -0x10de 0x0253 NV_25 "GeForce4 Ti 4200" -0x10de 0x0258 NV_25 "Quadro4 900 XGL" -0x10de 0x0259 NV_25 "Quadro4 750 XGL" -0x10de 0x025b NV_25 "Quadro4 700 XGL" -0x10de 0x0280 NV_25 "GeForce4 Ti 4800" -0x10de 0x0281 NV_25 "GeForce4 Ti 4200 AGP 8x" -0x10de 0x0282 NV_25 "GeForce4 Ti 4800 SE" -0x10de 0x0286 NV_25 "GeForce4 Ti 4200 Go AGP 8x" -0x10de 0x0288 NV_25 "Quadro4 980 XGL" -0x10de 0x0289 NV_25 "Quadro4 780 XGL" -0x10de 0x028c NV_25 "Quadro4 700 GoGL" -0x10de 0x0290 NV_40 "GeForce 7900 GTX" -0x10de 0x0291 NV_40 "GeForce 7900 GT" -0x10de 0x0292 NV_40 "GeForce 7900 GS" -0x10de 0x0298 NV_40 "GeForce Go 7900 GS" -0x10de 0x0299 NV_40 "GeForce Go 7900 GTX" -0x10de 0x029a NV_40 "Quadro FX 2500M" -0x10de 0x029b NV_40 "Quadro FX 1500M" -0x10de 0x029c NV_40 "Quadro FX 5500" -0x10de 0x029d NV_40 "Quadro FX 3500" -0x10de 0x029e NV_40 "Quadro FX 1500" -0x10de 0x029f NV_40 "Quadro FX 4500 X2" -0x10de 0x02a0 NV_20 "XGPU" -0x10de 0x02e1 NV_40 "GeForce 7600 GS" -0x10de 0x0300 NV_30 "GeForce FX" -0x10de 0x0301 NV_30 "GeForce FX 5800 Ultra" -0x10de 0x0302 NV_30 "GeForce FX 5800" -0x10de 0x0308 NV_30 "Quadro FX 2000" -0x10de 0x0309 NV_30 "Quadro FX 1000" -0x10de 0x0311 NV_30 "GeForce FX 5600 Ultra" -0x10de 0x0312 NV_30 "GeForce FX 5600" -0x10de 0x0313 NV_30 "NV31" -0x10de 0x0314 NV_30 "GeForce FX 5600XT" -0x10de 0x0316 NV_30 "NV31M" -0x10de 0x0317 NV_30 "NV31M Pro" -0x10de 0x031a NV_30 "GeForce FX Go5600" -0x10de 0x031b NV_30 "GeForce FX Go5650" -0x10de 0x031d NV_30 "NV31GLM" -0x10de 0x031e NV_30 "NV31GLM Pro" -0x10de 0x031f NV_30 "NV31GLM Pro" -0x10de 0x0320 NV_34 "GeForce FX 5200" -0x10de 0x0321 NV_34 "GeForce FX 5200 Ultra" -0x10de 0x0322 NV_34 "GeForce FX 5200" -0x10de 0x0323 NV_34 "GeForce FX 5200LE" -0x10de 0x0324 NV_34 "GeForce FX Go5200" -0x10de 0x0325 NV_34 "GeForce FX Go5250" -0x10de 0x0326 NV_34 "GeForce FX 5500" -0x10de 0x0327 NV_34 "GeForce FX 5100" -0x10de 0x0328 NV_34 "GeForce FX Go5200 32M/64M" -0x10de 0x0329 NV_34 "GeForce FX Go5200" -0x10de 0x032a NV_34 "Quadro NVS 280 PCI" -0x10de 0x032b NV_34 "Quadro FX 500/600 PCI" -0x10de 0x032c NV_34 "GeForce FX Go 5300" -0x10de 0x032d NV_34 "GeForce FX Go5100" -0x10de 0x032f NV_34 "NV34GL" -0x10de 0x0330 NV_30 "GeForce FX 5900 Ultra" -0x10de 0x0331 NV_30 "GeForce FX 5900" -0x10de 0x0332 NV_30 "GeForce FX 5900XT" -0x10de 0x0333 NV_30 "GeForce FX 5950 Ultra" -0x10de 0x0334 NV_30 "GeForce FX 5900ZT" -0x10de 0x0338 NV_30 "Quadro FX 3000" -0x10de 0x033f NV_30 "Quadro FX 700" -0x10de 0x0341 NV_30 "GeForce FX 5700 Ultra" -0x10de 0x0342 NV_30 "GeForce FX 5700" -0x10de 0x0343 NV_30 "GeForce FX 5700LE" -0x10de 0x0344 NV_30 "GeForce FX 5700VE" -0x10de 0x0345 NV_30 "NV36.5" -0x10de 0x0347 NV_30 "GeForce FX Go5700" -0x10de 0x0348 NV_30 "GeForce FX Go5700" -0x10de 0x0349 NV_30 "NV36M Pro" -0x10de 0x034b NV_30 "NV36MAP" -0x10de 0x034c NV_30 "Quadro FX Go1000" -0x10de 0x034e NV_30 "Quadro FX 1100" -0x10de 0x034f NV_30 "NV36GL" -0x10de 0x0391 NV_40 "GeForce 7600 GT" -0x10de 0x0392 NV_40 "GeForce 7600 GS" -0x10de 0x0393 NV_40 "GeForce 7300 GT" -0x10de 0x0394 NV_40 "GeForce 7600 LE" -0x10de 0x0395 NV_40 "GeForce 7300 GT" -0x10de 0x0397 NV_40 "GeForce Go 7700" -0x10de 0x0398 NV_40 "GeForce Go 7600" -0x10de 0x0399 NV_40 "GeForce Go 7600 GT" -0x10de 0x039a NV_40 "Quadro NVS 300M" -0x10de 0x039b NV_40 "GeForce Go 7900 SE" -0x10de 0x039c NV_40 "Quadro FX 550M" -0x10de 0x039e NV_40 "Quadro FX 560" -0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430" -0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" -0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" -0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420" -0x10de 0x0400 NV_50 "GeForce 8600 GTS" -0x10de 0x0402 NV_50 "GeForce 8600 GT" -0x10de 0x0421 NV_50 "GeForce 8500 GT" -0x10de 0x0422 NV_50 "GeForce 8400 GS" -0x10de 0x0423 NV_50 "GeForce 8300 GS" -0x10de 0x0429 NV_50 "Quadro NVS 140" -0x12d2 0x0020 NV_04 "TNT" -0x12d2 0x0028 NV_04 "TNT2" -0x12d2 0x0029 NV_04 "UTNT2" -0x12d2 0x002c NV_04 "VTNT2" -0x12d2 0x00a0 NV_04 "ITNT2" - [xgi] 0x18ca 0x2200 0 "XP5" 0x18ca 0x0047 0 "XP10 / XG47" diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index cb19c880..a163ae63 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -403,19 +403,81 @@ int nouveau_firstopen(struct drm_device *dev) int nouveau_load(struct drm_device *dev, unsigned long flags) { struct drm_nouveau_private *dev_priv; - - if (flags==NV_UNKNOWN) - return -EINVAL; + void __iomem *regs; + uint32_t reg0; + char architecture = 0; dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER); - if (!dev_priv) + if (!dev_priv) return -ENOMEM; - dev_priv->card_type=flags&NOUVEAU_FAMILY; - dev_priv->flags=flags&NOUVEAU_FLAGS; + dev_priv->flags = flags & NOUVEAU_FLAGS; dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; + DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); + + /* Time to determine the card architecture */ + regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x4); + if (!regs) { + DRM_ERROR("Could not ioremap to determine register\n"); + return -ENOMEM; + } + reg0 = readl(regs); + + /* We're dealing with >=NV10 */ + if ((reg0 & 0x0f000000) > 0 ) { + /* Bit 27-20 contain the architecture in hex */ + architecture = (reg0 & 0xff00000) >> 20; + /* NV04 or NV05 */ + } else if ((reg0 & 0xff00fff0) == 0x20004000) { + architecture = 0x04; + } + + iounmap(regs); + + if (architecture >= 0x50) { + dev_priv->card_type = NV_50; + } else if (architecture >= 0x44) { + dev_priv->card_type = NV_44; + } else if (architecture >= 0x40) { + dev_priv->card_type = NV_40; + } else if (architecture >= 0x34) { + dev_priv->card_type = NV_34; + } else if (architecture >= 0x30) { + dev_priv->card_type = NV_30; + } else if (architecture >= 0x25) { + dev_priv->card_type = NV_25; + } else if (architecture >= 0x20) { + dev_priv->card_type = NV_20; + } else if (architecture >= 0x17) { + dev_priv->card_type = NV_17; + } else if (architecture >= 0x15) { + dev_priv->card_type = NV_15; + } else if (architecture >= 0x11) { + dev_priv->card_type = NV_11; + } else if (architecture >= 0x10) { + dev_priv->card_type = NV_10; + } else if (architecture >= 0x04) { + dev_priv->card_type = NV_04; + } else { + dev_priv->card_type = NV_UNKNOWN; + } + + DRM_INFO("Detected an NV%d generation card\n", dev_priv->card_type); + + if (dev_priv->card_type == NV_UNKNOWN) { + return -EINVAL; + } + + /* Special flags */ + if (dev->pci_device == 0x01a0) { + dev_priv->flags |= NV_NFORCE; + } else if (dev->pci_device == 0x01f0) { + dev_priv->flags |= NV_NFORCE2; + } + dev->dev_private = (void *)dev_priv; + return 0; } From 319436c5cc51a1beb641e899987969fcf912deda Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Thu, 4 Oct 2007 09:39:31 +0200 Subject: [PATCH 362/437] nouveau: NV47 context switching voodoo + warning --- shared-core/nv40_graph.c | 169 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 3f3df515..7ce4273d 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -37,6 +37,7 @@ #define NV41_GRCTX_SIZE (92*1024) #define NV43_GRCTX_SIZE (70*1024) #define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */ +#define NV47_GRCTX_SIZE (125*1024) #define NV49_GRCTX_SIZE (164640) #define NV4A_GRCTX_SIZE (64*1024) #define NV4B_GRCTX_SIZE (164640) @@ -565,6 +566,136 @@ nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } +/* This may only work on 7800 AGP cards, will include a warning */ +static void +nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start); + INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00000178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00000180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); + for (i=0x00000194; i<=0x000001b0; i+=4) + INSTANCE_WR(ctx, i/4, 0x80000000); + INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00000350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00000354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00000358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); + for (i=0x000003c0; i<=0x000003fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000111); + INSTANCE_WR(ctx, 0x00000454/4, 0x00000111); + INSTANCE_WR(ctx, 0x00000458/4, 0x00080060); + INSTANCE_WR(ctx, 0x00000474/4, 0x00000080); + INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000); + INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00000490/4, 0x46400000); + INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000); + for (i=0x000004a4; i<=0x000004e0; i+=4) + INSTANCE_WR(ctx, i/4, 0x88888888); + INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00000500/4, 0x00011100); + for (i=0x0000051c; i<=0x00000558; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000); + INSTANCE_WR(ctx, 0x00000590/4, 0x70605040); + INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000); + INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00000608/4, 0x00000098); + INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00); + for (i=0x000006b0; i<=0x000006ec; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x000006f0; i<=0x0000072c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00000770; i<=0x000007ac; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x000007b0; i<=0x000007ec; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x000007f0; i<=0x0000082c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00000870; i<=0x000008ac; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80); + INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80); + INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80); + INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80); + INSTANCE_WR(ctx, 0x00000910/4, 0x00000202); + INSTANCE_WR(ctx, 0x00000914/4, 0x00000202); + INSTANCE_WR(ctx, 0x00000918/4, 0x00000202); + INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202); + for (i=0x00000930; i<=0x0000095c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + INSTANCE_WR(ctx, 0x00000970/4, 0x00000002); + INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021); + INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200); + INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00); + INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000); + INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100); + INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001); + INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003); + INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001); + for (i=0x00000b10; i<=0x00000b8c; i+=4) + INSTANCE_WR(ctx, i/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005); + INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff); + for (i=0x00000bdc; i<=0x00000bf8; i+=4) + INSTANCE_WR(ctx, i/4, 0x00005555); + INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001); + INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001); + INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001); + INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000); + for (i=0x00003000; i<=0x00003078; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x00004dc0; i<=0x00006fb0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x00006fc0; i<=0x000073b0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x00009800; i<=0x0000b9f0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0000ba00; i<=0x00010430; i+=24) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x00010440; i<=0x00010830; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x00012c80; i<=0x00014e70; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x00014e80; i<=0x00015270; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x000176c0; i<=0x000198b0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x000198c0; i<=0x00019cb0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0001c100; i<=0x0001e2f0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0001e300; i<=0x0001e6f0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + static void nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { @@ -1361,6 +1492,11 @@ nv40_graph_create_context(struct nouveau_channel *chan) ctx_size = NV46_GRCTX_SIZE; ctx_init = nv46_graph_context_init; break; + case 0x47: + DRM_INFO("NV47 warning: If your card behaves strangely, please come to the irc channel\n"); + ctx_size = NV47_GRCTX_SIZE; + ctx_init = nv47_graph_context_init; + break; case 0x49: ctx_size = NV49_GRCTX_SIZE; ctx_init = nv49_graph_context_init; @@ -1675,6 +1811,38 @@ static uint32_t nv46_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; +static uint32_t nv47_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606, + 0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, + 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, + 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12, + 0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a, + 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, + 0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901, + 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19, + 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00, + 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, + 0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000, + 0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a, + 0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006, + 0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318, + 0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, + 0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f, + 0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880, + 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, + 0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006, + 0x0060000e, ~0 +}; + //this is used for nv49 and nv4b static uint32_t nv49_4b_ctx_voodoo[] ={ 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, @@ -1835,6 +2003,7 @@ nv40_graph_init(struct drm_device *dev) case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; case 0x44: ctx_voodoo = nv44_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; + case 0x47: ctx_voodoo = nv47_ctx_voodoo; break; case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; From d351601899e5814d809b8e86ab6f0d6e7676f585 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Thu, 4 Oct 2007 09:46:16 +0200 Subject: [PATCH 363/437] nouveau: Remove excess device classes. --- shared-core/nouveau_drm.h | 3 --- shared-core/nouveau_state.c | 6 ------ 2 files changed, 9 deletions(-) diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index c4f1e9a4..988d467a 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -123,12 +123,9 @@ enum nouveau_card_type { NV_05 =5, NV_10 =10, NV_11 =11, - NV_15 =11, NV_17 =17, NV_20 =20, - NV_25 =20, NV_30 =30, - NV_34 =30, NV_40 =40, NV_44 =44, NV_50 =50, diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index a163ae63..dc075d0c 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -441,18 +441,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->card_type = NV_44; } else if (architecture >= 0x40) { dev_priv->card_type = NV_40; - } else if (architecture >= 0x34) { - dev_priv->card_type = NV_34; } else if (architecture >= 0x30) { dev_priv->card_type = NV_30; - } else if (architecture >= 0x25) { - dev_priv->card_type = NV_25; } else if (architecture >= 0x20) { dev_priv->card_type = NV_20; } else if (architecture >= 0x17) { dev_priv->card_type = NV_17; - } else if (architecture >= 0x15) { - dev_priv->card_type = NV_15; } else if (architecture >= 0x11) { dev_priv->card_type = NV_11; } else if (architecture >= 0x10) { From 18952a167014f21545e3fda28ed2c09b09789323 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Sat, 6 Oct 2007 12:00:08 +0200 Subject: [PATCH 364/437] nouveau : print correct value in nouveau_graph_dump_trap_info for nv04 --- shared-core/nouveau_irq.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 45ae6edf..506e8052 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -303,7 +303,11 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) } nsource = NV_READ(NV03_PGRAPH_NSOURCE); nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - if (dev_priv->card_type < NV_50) { + if (dev_priv->card_type < NV_10) { + class = NV_READ(0x400180 + subc*4) & 0xFF; + } else if (dev_priv->card_type < NV_40) { + class = NV_READ(0x400160 + subc*4) & 0xFFF; + } else if (dev_priv->card_type < NV_50) { class = NV_READ(0x400160 + subc*4) & 0xFFFF; } else { class = NV_READ(0x400814); From 20928a2f2b3f1fa15c46edcf7e20f97566664ce8 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Sun, 7 Oct 2007 19:01:56 +0200 Subject: [PATCH 365/437] nouveau: A char is signed, so it may overflow for >NV50. --- shared-core/nouveau_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index dc075d0c..fd80e1e7 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -405,7 +405,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) struct drm_nouveau_private *dev_priv; void __iomem *regs; uint32_t reg0; - char architecture = 0; + uint8_t architecture = 0; dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER); if (!dev_priv) From 9b294bbe0ec79177298ea32746fbed03fcf62055 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Wed, 10 Oct 2007 01:12:20 +0200 Subject: [PATCH 366/437] nouveau: try to fix big endian. --- shared-core/nouveau_reg.h | 1 + shared-core/nouveau_state.c | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 59b69547..76f5950d 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -57,6 +57,7 @@ # define NV50_FIFO_REGS_DMAGET(i) (NV50_FIFO_REGS(i)+0x44) #define NV03_PMC_BOOT_0 0x00000000 +#define NV03_PMC_BOOT_1 0x00000004 #define NV03_PMC_INTR_0 0x00000100 # define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8) # define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index fd80e1e7..eec88fe4 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -283,6 +283,10 @@ nouveau_card_init(struct drm_device *dev) ret = nouveau_init_card_mappings(dev); if (ret) return ret; + /* Put the card in BE mode if it's not */ + if (NV_READ(NV03_PMC_BOOT_1)) + NV_WRITE(NV03_PMC_BOOT_1,0x01000001); + /* Determine exact chipset we're running on */ if (dev_priv->card_type < NV_10) dev_priv->chipset = dev_priv->card_type; @@ -404,7 +408,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) { struct drm_nouveau_private *dev_priv; void __iomem *regs; - uint32_t reg0; + uint32_t reg0,reg1; uint8_t architecture = 0; dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER); @@ -422,7 +426,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) DRM_ERROR("Could not ioremap to determine register\n"); return -ENOMEM; } - reg0 = readl(regs); + + reg0 = readl(regs+NV03_PMC_BOOT_0); + reg1 = readl(regs+NV03_PMC_BOOT_1); + if (reg1) + reg0=___swab32(reg0); /* We're dealing with >=NV10 */ if ((reg0 & 0x0f000000) > 0 ) { @@ -457,7 +465,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->card_type = NV_UNKNOWN; } - DRM_INFO("Detected an NV%d generation card\n", dev_priv->card_type); + DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0); if (dev_priv->card_type == NV_UNKNOWN) { return -EINVAL; From d4680333dc850832258d0f38fb2a236a3f568fc8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 10 Oct 2007 09:31:51 +0200 Subject: [PATCH 367/437] Only add native-type on EXE signals. Otherwise flush flags may get out of sync. --- linux-core/drm_fence.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index c25ff3b8..9a29356b 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -90,7 +90,9 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, break; } - type |= fence->native_type; + if (is_exe) + type |= fence->native_type; + relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { From d912709a63c59d0b3e48458bac41fb76ea279214 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Wed, 10 Oct 2007 16:41:21 +0200 Subject: [PATCH 368/437] nouveau: PMC_BOOT_1 was not mapped. --- shared-core/nouveau_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index eec88fe4..3ce9247f 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -421,7 +421,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); /* Time to determine the card architecture */ - regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x4); + regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8); if (!regs) { DRM_ERROR("Could not ioremap to determine register\n"); return -ENOMEM; From bf126f4925bf1601935e085be2feeb004b474a05 Mon Sep 17 00:00:00 2001 From: Matthieu Castet Date: Wed, 10 Oct 2007 21:11:43 +0200 Subject: [PATCH 369/437] nouveau : nv10 and nv04 PGRAPH_NSTATUS are different --- shared-core/nouveau_irq.c | 22 +++++++++++++++++----- shared-core/nouveau_reg.h | 12 ++++++++---- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 506e8052..dfef718b 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -175,10 +175,18 @@ struct nouveau_bitfield_names static struct nouveau_bitfield_names nouveau_nstatus_names[] = { - { NV03_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, - { NV03_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, - { NV03_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV03_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } + { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +}; + +static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] = +{ + { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } }; static struct nouveau_bitfield_names nouveau_nsource_names[] = @@ -317,8 +325,12 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) nouveau_print_bitfield_names(nsource, nouveau_nsource_names, ARRAY_SIZE(nouveau_nsource_names)); printk(", nStatus:"); - nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, + if (dev_priv->card_type < NV_10) + nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, ARRAY_SIZE(nouveau_nstatus_names)); + else + nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10, + ARRAY_SIZE(nouveau_nstatus_names_nv10)); printk("\n"); DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n", diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 76f5950d..4dc3b7fa 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -119,10 +119,14 @@ #define NV10_PGRAPH_DEBUG_4 0x00400090 #define NV03_PGRAPH_INTR 0x00400100 #define NV03_PGRAPH_NSTATUS 0x00400104 -# define NV03_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) -# define NV03_PGRAPH_NSTATUS_INVALID_STATE (1<<24) -# define NV03_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) -# define NV03_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) +# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) +# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) +# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) +# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) +# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) +# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) +# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) +# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) #define NV03_PGRAPH_NSOURCE 0x00400108 # define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0) # define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1) From 83da774b192966b8c3f00b531ecfd4ec2b5eceaa Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 10 Oct 2007 15:25:30 -0700 Subject: [PATCH 370/437] Fix command list submission on big-endian. --- linux-core/xgi_cmdlist.c | 30 +++++------------------------- linux-core/xgi_misc.c | 2 +- linux-core/xgi_pcie.c | 3 +-- 3 files changed, 7 insertions(+), 28 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index c25b0e0d..69bf6465 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -45,7 +45,7 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", map->handle, addr, data); #endif - DRM_WRITE32(map, addr, cpu_to_le32(data)); + DRM_WRITE32(map, addr, data); } @@ -98,25 +98,6 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, const struct xgi_cmd_info *const pCmdInfo = (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); -#ifdef __BIG_ENDIAN - const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); - unsigned i; - unsigned j; - - xgi_waitfor_pci_idle(info); - for (j = 4; j < pCmdInfo->size; j += 4) { - u32 reg = ptr[j]; - - for (i = 1; i < 4; i++) { - if ((reg & 1) != 0) { - const unsigned r = 0x2100 | (reg & 0x0fe); - DRM_WRITE32(info->mmio_map, r, ptr[j + i]); - } - - reg >>= 8; - } - } -#else u32 begin[4]; @@ -167,7 +148,6 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); -#endif drm_fence_flush_old(info->dev, 0, info->next_sequence); return 0; } @@ -323,13 +303,13 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) */ void xgi_emit_nop(struct xgi_info * info) { - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK - | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)); info->cmdring.last_ptr[2] = 0; info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) - | (BEGIN_VALID_MASK); + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK)); triggerHWCommandList(info); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index f39b3bb5..4a4a9844 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -326,7 +326,7 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) unsigned int same_count = 0; while (idleCount < 5) { - const u32 status = le32_to_cpu(DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)) + const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK; if (status == old_status) { diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 4becf35b..a7d3ea24 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -40,8 +40,7 @@ void xgi_gart_flush(struct drm_device *dev) DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); /* Set GART base address to HW */ - DRM_WRITE32(info->mmio_map, 0xB034, - cpu_to_le32(info->gart_info.bus_addr)); + DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); /* Flush GART table. */ DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); From fc7d4d19d36b6a12ed23d4d9e50826346258299f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 10 Oct 2007 15:27:07 -0700 Subject: [PATCH 371/437] Eliminate trailing whitespace from last commit. --- linux-core/xgi_cmdlist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 69bf6465..d7b23c89 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -303,12 +303,12 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) */ void xgi_emit_nop(struct xgi_info * info) { - info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)); info->cmdring.last_ptr[2] = 0; info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK)); triggerHWCommandList(info); From f0fd53f86b30e230f3f34b49b54392d20f053a89 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 10 Oct 2007 15:31:00 -0700 Subject: [PATCH 372/437] FreeBSD: Fill in domain field when supported. --- bsd-core/drm_drv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index afd90351..c36b78aa 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -516,8 +516,11 @@ static int drm_load(drm_device_t *dev) DRM_DEBUG( "\n" ); dev->irq = pci_get_irq(dev->device); - /* XXX Fix domain number (alpha hoses) */ +#if defined(__FreeBSD__) && __FreeBSD_version >= 700053 + dev->pci_domain = pci_get_domain(dev->device); +#else dev->pci_domain = 0; +#endif dev->pci_bus = pci_get_bus(dev->device); dev->pci_slot = pci_get_slot(dev->device); dev->pci_func = pci_get_function(dev->device); From 604f02ff619d87d1372bcb7969c826d981fefc60 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 12 Oct 2007 09:46:11 +1000 Subject: [PATCH 373/437] i915: check mask instead of flags for buffer fence types --- linux-core/i915_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 75763e71..f3ba7ce5 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -42,7 +42,7 @@ int i915_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, uint32_t * type) { - if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else *type = 1; From 74001c34e5ad768feec8b2fbe9a617bc598a0a4b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 12 Oct 2007 10:54:38 +1000 Subject: [PATCH 374/437] i915: add superioctl support to i915 This adds the initial i915 superioctl interface. The interface should be sufficent even if the implementation may needs fixes/optimisations internally in the drm wrt caching etc. --- shared-core/i915_dma.c | 344 +++++++++++++++++++++++++++++++++++++++++ shared-core/i915_drm.h | 39 ++++- shared-core/i915_drv.h | 10 +- 3 files changed, 391 insertions(+), 2 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 3a9ecab2..7209a8de 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -147,6 +147,10 @@ static int i915_initialize(struct drm_device * dev, return -EINVAL; } +#ifdef I915_HAVE_BUFFER + dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; +#endif + dev_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); @@ -725,6 +729,343 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, return 0; } +#ifdef I915_HAVE_BUFFER +struct i915_relocatee_info { + struct drm_buffer_object *buf; + unsigned long offset; + u32 *data_page; + unsigned page_offset; + struct drm_bo_kmap_obj kmap; + int is_iomem; +}; + +static void i915_dereference_buffers_locked(struct drm_buffer_object **buffers, + unsigned num_buffers) +{ + while (num_buffers--) + drm_bo_usage_deref_locked(&buffers[num_buffers]); +} + +int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, + struct drm_buffer_object **buffers, + struct i915_relocatee_info *relocatee, + uint32_t *reloc) +{ + unsigned index; + unsigned long new_cmd_offset; + u32 val; + int ret; + + if (reloc[2] >= num_buffers) { + DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); + return -EINVAL; + } + + new_cmd_offset = reloc[0]; + if (!relocatee->data_page || + !drm_bo_same_page(relocatee->offset, new_cmd_offset)) { + drm_bo_kunmap(&relocatee->kmap); + relocatee->offset = new_cmd_offset; + ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, + 1, &relocatee->kmap); + if (ret) { + DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset); + return ret; + } + + relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, + &relocatee->is_iomem); + relocatee->page_offset = (relocatee->offset & PAGE_MASK); + } + + val = buffers[reloc[2]]->offset; + index = (reloc[0] - relocatee->page_offset) >> 2; + + /* add in validate */ + val = val + reloc[1]; + + relocatee->data_page[index] = val; + return 0; +} + +int i915_process_relocs(struct drm_file *file_priv, + uint32_t buf_handle, + uint32_t *reloc_buf_handle, + struct i915_relocatee_info *relocatee, + struct drm_buffer_object **buffers, + uint32_t num_buffers) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *reloc_list_object; + uint32_t cur_handle = *reloc_buf_handle; + uint32_t *reloc_page; + int ret, reloc_is_iomem, reloc_stride; + uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset; + struct drm_bo_kmap_obj reloc_kmap; + + memset(&reloc_kmap, 0, sizeof(reloc_kmap)); + + reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1); + if (!reloc_list_object) + return -EINVAL; + + ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } + + reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem); + num_relocs = reloc_page[0] & 0xffff; + + if ((reloc_page[0] >> 16) & 0xffff) { + DRM_ERROR("Unsupported relocation type requested\n"); + goto out; + } + + /* get next relocate buffer handle */ + *reloc_buf_handle = reloc_page[1]; + reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ + + DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]); + + reloc_page_offset = 0; + reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t); + reloc_end = reloc_offset + (num_relocs * reloc_stride); + + do { + next_offset = drm_bo_offset_end(reloc_offset, reloc_end); + + do { + cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t); + ret = i915_apply_reloc(file_priv, num_buffers, + buffers, relocatee, &reloc_page[cur_offset]); + if (ret) + goto out; + + reloc_offset += reloc_stride; + } while (reloc_offset < next_offset); + + drm_bo_kunmap(&reloc_kmap); + + reloc_offset = next_offset; + if (reloc_offset != reloc_end) { + ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } + + reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem); + reloc_page_offset = reloc_offset & ~PAGE_MASK; + } + + } while (reloc_offset != reloc_end); +out: + drm_bo_kunmap(&reloc_kmap); + + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&reloc_list_object); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/* + * Validate, add fence and relocate a block of bos from a userspace list + */ +int i915_validate_buffer_list(struct drm_file *file_priv, + unsigned int fence_class, uint64_t data, + struct drm_buffer_object **buffers, + uint32_t *num_buffers) +{ + struct drm_i915_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_arg_rep rep; + unsigned long next = 0; + int ret = 0; + unsigned buf_count = 0; + struct drm_device *dev = file_priv->head->dev; + uint32_t buf_reloc_handle, buf_handle; + struct i915_relocatee_info relocatee; + + do { + if (buf_count >= *num_buffers) { + DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers); + ret = -EINVAL; + goto out_err; + } + + buffers[buf_count] = NULL; + + if (copy_from_user(&arg, (void __user *)(unsigned)data, sizeof(arg))) { + ret = -EFAULT; + goto out_err; + } + + if (arg.handled) { + data = arg.next; + buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1); + buf_count++; + continue; + } + + rep.ret = 0; + if (req->op != drm_bo_validate) { + DRM_ERROR + ("Buffer object operation wasn't \"validate\".\n"); + rep.ret = -EINVAL; + goto out_err; + } + + buf_handle = req->bo_req.handle; + buf_reloc_handle = arg.reloc_handle; + + rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, + req->bo_req.fence_class, + req->bo_req.flags, + req->bo_req.mask, + req->bo_req.hint, + &rep.bo_info, + &buffers[buf_count]); + + if (rep.ret) { + DRM_ERROR("error on handle validate %d\n", rep.ret); + goto out_err; + } + + + next = arg.next; + arg.handled = 1; + arg.d.rep = rep; + + if (copy_to_user((void __user *)(unsigned)data, &arg, sizeof(arg))) + return -EFAULT; + + data = next; + buf_count++; + + if (buf_reloc_handle) { + memset(&relocatee, 0, sizeof(relocatee)); + + relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); + if (!relocatee.buf) { + DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); + ret = -EINVAL; + goto out_err; + } + + while (buf_reloc_handle) { + ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count); + if (ret) { + DRM_ERROR("process relocs failed\n"); + break; + } + } + + drm_bo_kunmap(&relocatee.kmap); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&relocatee.buf); + mutex_unlock(&dev->struct_mutex); + + if (ret) + goto out_err; + + } + } while (next != 0); + *num_buffers = buf_count; + return 0; +out_err: + mutex_lock(&dev->struct_mutex); + i915_dereference_buffers_locked(buffers, buf_count); + mutex_unlock(&dev->struct_mutex); + *num_buffers = 0; + return (ret) ? ret : rep.ret; +} + +static int i915_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) + dev_priv->sarea_priv; + struct drm_i915_execbuffer *exec_buf = data; + struct _drm_i915_batchbuffer *batch = &exec_buf->batch; + struct drm_fence_arg *fence_arg = &exec_buf->fence_arg; + int num_buffers; + int ret; + struct drm_buffer_object **buffers; + struct drm_fence_object *fence; + + if (!dev_priv->allow_batchbuffer) { + DRM_ERROR("Batchbuffer ioctl disabled\n"); + return -EINVAL; + } + + + LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect))) + return -EFAULT; + + if (exec_buf->num_buffers > dev_priv->max_validate_buffers) + return -EINVAL; + + num_buffers = exec_buf->num_buffers; + + buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER); + if (!buffers) + return -ENOMEM; + + /* validate buffer list + fixup relocations */ + ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, + buffers, &num_buffers); + if (ret) + goto out_free; + + /* submit buffer */ + batch->start = buffers[num_buffers-1]->offset; + + DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n", + batch->start, batch->used, batch->num_cliprects); + + ret = i915_dispatch_batchbuffer(dev, batch); + if (ret) + goto out_err0; + + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + + /* fence */ + ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence); + if (ret) + goto out_err0; + + if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) { + ret = drm_fence_add_user_object(file_priv, fence, fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE); + if (!ret) { + fence_arg->handle = fence->base.hash.key; + fence_arg->fence_class = fence->fence_class; + fence_arg->type = fence->type; + fence_arg->signaled = fence->signaled; + } + } + drm_fence_usage_deref_unlocked(&fence); +out_err0: + + /* handle errors */ + mutex_lock(&dev->struct_mutex); + i915_dereference_buffers_locked(buffers, num_buffers); + mutex_unlock(&dev->struct_mutex); + +out_free: + drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER); + + return ret; +} +#endif + static int i915_do_cleanup_pageflip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -977,6 +1318,9 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), +#ifdef I915_HAVE_BUFFER + DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), +#endif }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 9976804e..a6c3cf30 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -160,6 +160,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_VBLANK_SWAP 0x0f #define DRM_I915_MMIO 0x10 #define DRM_I915_HWS_ADDR 0x11 +#define DRM_I915_EXECBUFFER 0x12 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -177,7 +178,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) - +#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer) /* Asynchronous page flipping: */ @@ -325,4 +326,40 @@ typedef struct drm_i915_hws_addr { uint64_t addr; } drm_i915_hws_addr_t; +/* + * Relocation header is 4 uint32_ts + * 0 - (16-bit relocation type << 16)| 16 bit reloc count + * 1 - buffer handle for another list of relocs + * 2-3 - spare. + */ +#define I915_RELOC_HEADER 4 + +/* + * type 0 relocation has 4-uint32_t stride + * 0 - offset into buffer + * 1 - delta to add in + * 2 - index into buffer list + * 3 - reserved (for optimisations later). + */ +#define I915_RELOC_TYPE_0 0 +#define I915_RELOC0_STRIDE 4 + +struct drm_i915_op_arg { + uint64_t next; + uint32_t reloc_handle; + int handled; + union { + struct drm_bo_op_req req; + struct drm_bo_arg_rep rep; + } d; + +}; + +struct drm_i915_execbuffer { + uint64_t ops_list; + uint32_t num_buffers; + struct _drm_i915_batchbuffer batch; + struct drm_fence_arg fence_arg; +}; + #endif /* _I915_DRM_H_ */ diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 3b26040f..e8f18798 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -56,15 +56,20 @@ * 1.8: New ioctl for ARB_Occlusion_Query * 1.9: Usable page flipping and triple buffering * 1.10: Plane/pipe disentangling + * 1.11: TTM superioctl */ #define DRIVER_MAJOR 1 #if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) -#define DRIVER_MINOR 10 +#define DRIVER_MINOR 11 #else #define DRIVER_MINOR 6 #endif #define DRIVER_PATCHLEVEL 0 +#ifdef I915_HAVE_BUFFER +#define I915_MAX_VALIDATE_BUFFERS 4096 +#endif + typedef struct _drm_i915_ring_buffer { int tail_mask; unsigned long Start; @@ -133,10 +138,13 @@ typedef struct drm_i915_private { #endif #ifdef I915_HAVE_BUFFER void *agp_iomap; + unsigned int max_validate_buffers; #endif + DRM_SPINTYPE swaps_lock; drm_i915_vbl_swap_t vbl_swaps; unsigned int swaps_pending; + } drm_i915_private_t; enum intel_chip_family { From 74ea019863c1d08d31eac81d3bfc73e97479b2c5 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 12 Oct 2007 22:35:39 +0200 Subject: [PATCH 375/437] nouveau: added support for software methods, and implemented those necessary for NV04 (TNT1) to start X --- shared-core/nouveau_swmthd.c | 193 +++++++++++++++++++++++++++++++++++ shared-core/nouveau_swmthd.h | 34 ++++++ 2 files changed, 227 insertions(+) create mode 100644 shared-core/nouveau_swmthd.c create mode 100644 shared-core/nouveau_swmthd.h diff --git a/shared-core/nouveau_swmthd.c b/shared-core/nouveau_swmthd.c new file mode 100644 index 00000000..66ef6233 --- /dev/null +++ b/shared-core/nouveau_swmthd.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2007 Arthur Huillet. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Arthur Huillet + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_reg.h" + +/*TODO: add a "card_type" attribute*/ +typedef struct{ + uint32_t oclass; /* object class for this software method */ + uint32_t mthd; /* method number */ + void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */ + } nouveau_software_method_t; + + + /* This function handles the NV04 setcontext software methods. +One function for all because they are very similar.*/ +static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF; + uint32_t value_to_set = 0, bit_to_set = 0; + + switch ( oclass ) { + case 0x4a: + switch ( mthd ) { + case 0x188 : + case 0x18c : + bit_to_set = 0; + break; + case 0x198 : + bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ + break; + case 0x2fc : + bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ + break; + default : ; + }; + break; + case 0x5c: + switch ( mthd ) { + case 0x184: + bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ + break; + case 0x188: + case 0x18c: + bit_to_set = 0; + break; + case 0x198: + bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ + break; + case 0x2fc : + bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ + break; + }; + break; + case 0x5f: + switch ( mthd ) { + case 0x184 : + bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/ + break; + case 0x188 : + bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ + break; + case 0x18c : + case 0x190 : + bit_to_set = 0; + break; + case 0x19c : + bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ + break; + case 0x2fc : + bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ + break; + }; + break; + case 0x61: + switch ( mthd ) { + case 0x188 : + bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ + break; + case 0x18c : + case 0x190 : + bit_to_set = 0; + break; + case 0x19c : + bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ + break; + case 0x2fc : + bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ + break; + }; + break; + case 0x77: + switch ( mthd ) { + case 0x198 : + bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ + break; + case 0x304 : + bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG + break; + }; + break; + default :; + }; + + value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set; + + /*RAMIN*/ + nouveau_wait_for_idle(dev); + NV_WRITE(0x00700000 | inst_loc << 4, value_to_set); + + /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/ + NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set); + + /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/ + NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set); +} + + nouveau_software_method_t nouveau_sw_methods[] = { + /*NV04 context software methods*/ + { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method }, + { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method }, + { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method }, + { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method }, + { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method }, + { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method }, + { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method }, + { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method }, + { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method }, + { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method }, + { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method }, + { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method }, + { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method }, + { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method }, + { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method }, + { 0x61, 0x188, nouveau_NV04_setcontext_sw_method }, + { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method }, + { 0x61, 0x190, nouveau_NV04_setcontext_sw_method }, + { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method }, + { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method }, + { 0x77, 0x198, nouveau_NV04_setcontext_sw_method }, + { 0x77, 0x304, nouveau_NV04_setcontext_sw_method }, + /*terminator*/ + { 0x0, 0x0, NULL, }, + }; + + int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) { + int i = 0; + while ( nouveau_sw_methods[ i ] . method_code != NULL ) + { + if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method ) + { + nouveau_sw_methods[ i ] . method_code(dev, oclass, method); + return 0; + } + i ++; + } + + return 1; + } + + diff --git a/shared-core/nouveau_swmthd.h b/shared-core/nouveau_swmthd.h new file mode 100644 index 00000000..df8c7400 --- /dev/null +++ b/shared-core/nouveau_swmthd.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2007 Arthur Huillet. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Arthur Huillet + */ + +int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */ + From 9d779e2c88a02f5f9d57618145654610f0f10e28 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 12 Oct 2007 22:39:58 +0200 Subject: [PATCH 376/437] nouveau: mandatory "oops I forgot half of the files" commit --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_swmthd.c | 1 + linux-core/nouveau_swmthd.h | 1 + shared-core/nouveau_irq.c | 21 +++++++++++++++------ shared-core/nv04_graph.c | 25 +++++++++++++++---------- 5 files changed, 33 insertions(+), 17 deletions(-) create mode 120000 linux-core/nouveau_swmthd.c create mode 120000 linux-core/nouveau_swmthd.h diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 0eb10783..715454bc 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -21,7 +21,7 @@ i810-objs := i810_drv.o i810_dma.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ nouveau_sgdma.o nouveau_dma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ diff --git a/linux-core/nouveau_swmthd.c b/linux-core/nouveau_swmthd.c new file mode 120000 index 00000000..c5390801 --- /dev/null +++ b/linux-core/nouveau_swmthd.c @@ -0,0 +1 @@ +../shared-core/nouveau_swmthd.c \ No newline at end of file diff --git a/linux-core/nouveau_swmthd.h b/linux-core/nouveau_swmthd.h new file mode 120000 index 00000000..33425dcd --- /dev/null +++ b/linux-core/nouveau_swmthd.h @@ -0,0 +1 @@ +../shared-core/nouveau_swmthd.h \ No newline at end of file diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index dfef718b..7ba45700 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -35,6 +35,7 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_reg.h" +#include "nouveau_swmthd.h" void nouveau_irq_preinstall(struct drm_device *dev) { @@ -340,20 +341,27 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) static void nouveau_pgraph_irq_handler(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t status, nsource; + uint32_t status, nsource, method; + uint32_t obj_class; - status = NV_READ(NV03_PGRAPH_INTR); - if (!status) - return; + while ( (status = NV_READ(NV03_PGRAPH_INTR)) ) { nsource = NV_READ(NV03_PGRAPH_NSOURCE); if (status & NV_PGRAPH_INTR_NOTIFY) { DRM_DEBUG("PGRAPH notify interrupt\n"); - nouveau_graph_dump_trap_info(dev); + if ( dev_priv->card_type == NV_04 && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD ) ) { + /* NV4 (nvidia TNT 1) reports software methods with PGRAPH NOTIFY ILLEGAL_MTHD*/ + method = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC; + obj_class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF; + DRM_DEBUG("Got NV04 software method method %x for class %#x\n", method, obj_class); + if ( nouveau_sw_method_execute(dev, obj_class, method) ) + DRM_ERROR("Unable to execute NV04 software method %x for object class %x. Please report.\n", method, obj_class); + } status &= ~NV_PGRAPH_INTR_NOTIFY; NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); + NV_WRITE(NV04_PGRAPH_FIFO, 1); } if (status & NV_PGRAPH_INTR_ERROR) { @@ -392,8 +400,9 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status); NV_WRITE(NV03_PGRAPH_INTR, status); } +NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); + } - NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); } static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 33dd0a86..cffa3e4a 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -482,16 +482,22 @@ int nv04_graph_init(struct drm_device *dev) { NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); - NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); + NV_WRITE(NV04_PGRAPH_VALID1, 0); + NV_WRITE(NV04_PGRAPH_VALID2, 0); + /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); + NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000); - NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100); - NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 | - (0x00D00000) | - (1<<29) | - (1<<31)); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31); + /*1231C000 blob, 001 haiku*/ + //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ + NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100); + /*0x72111100 blob , 01 haiku*/ + /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ + NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071); + /*haiku same*/ + + /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x10d4ff31); + /*haiku and blob 10d4*/ NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100); @@ -507,4 +513,3 @@ int nv04_graph_init(struct drm_device *dev) { void nv04_graph_takedown(struct drm_device *dev) { } - From 0d2554f83e72cae1bc44e476fbed4fc78873264f Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Fri, 12 Oct 2007 23:43:31 +0300 Subject: [PATCH 377/437] nouveau: Make notifiers go into PCI memory On some hardware notifers in AGP memory just don't work. --- shared-core/nouveau_notifier.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index d3b79683..c361bc69 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -36,17 +36,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; int flags, ret; - /*TODO: PCI notifier blocks */ -#ifndef __powerpc__ - if (dev_priv->agp_heap) - flags = NOUVEAU_MEM_AGP; - else -#endif - if (dev_priv->pci_heap) - flags = NOUVEAU_MEM_PCI; - else - flags = NOUVEAU_MEM_FB; - flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE); + flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED | + NOUVEAU_MEM_FB_ACCEPTABLE); chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, (struct drm_file *)-2); From 50deb31e9ff556f941449bc788821eaa2e5f9e34 Mon Sep 17 00:00:00 2001 From: Stuart Bennett Date: Tue, 9 Oct 2007 20:39:10 +0100 Subject: [PATCH 378/437] nouveau: Fix typos in nv20_graph_context_init --- shared-core/nv20_graph.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 9edab594..6b2aa5ad 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -86,11 +86,11 @@ write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements: INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); for (i = 0; i < 16; ++i) INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); - INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7ffff); + INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff); INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); - INSTANCE_WR(ctx, (0x33c/4)+183, 0x00004000); + INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000); INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); /* From 3ab7627651f4c48a114d91158d41e4c4f528c4cc Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Fri, 12 Oct 2007 23:55:59 +0300 Subject: [PATCH 379/437] nouveau: Fix a typo in nv25_graph_context_init --- shared-core/nv20_graph.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 6b2aa5ad..ae0e0858 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -283,7 +283,7 @@ write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements: INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008); for (i=0; i<16; ++i) INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000); - INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7ffff); + INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff); /* write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: From 8d3cb7e472ceb31a28de3acc19176e3a2d2995b1 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 14 Oct 2007 21:19:13 +1000 Subject: [PATCH 380/437] i915: fix vbl_swap allocation --- shared-core/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 7baa23c0..db18a895 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -691,7 +691,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EBUSY; } - vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); + vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); if (!vbl_swap) { DRM_ERROR("Failed to allocate memory to queue swap\n"); From 811e43f9e27abdf4c8a4b36c7c287e53134fc950 Mon Sep 17 00:00:00 2001 From: Jeremy Kolb Date: Sun, 14 Oct 2007 10:56:17 -0400 Subject: [PATCH 381/437] nouveau: fix warning. --- shared-core/nouveau_mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index dbfba351..e2f0b38d 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -430,7 +430,7 @@ int nouveau_mem_init(struct drm_device *dev) sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone if (drm_sg_alloc(dev, &sgreq)) { - DRM_ERROR("Unable to allocate %dMB of scatter-gather" + DRM_ERROR("Unable to allocate %ldMB of scatter-gather" " pages for PCI DMA!",sgreq.size>>20); } else { if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, From 837e364353b3137ce87b5cce9c06f05a3d603201 Mon Sep 17 00:00:00 2001 From: Jeremy Kolb Date: Sun, 14 Oct 2007 10:56:31 -0400 Subject: [PATCH 382/437] nouveau: fix warning. --- shared-core/nouveau_notifier.c | 1 - 1 file changed, 1 deletion(-) diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index c361bc69..31e2b244 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -33,7 +33,6 @@ int nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; int flags, ret; flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED | From 30353c8efcc026ee8940f3eadab084c42a3acd4e Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Sun, 14 Oct 2007 23:07:30 +0200 Subject: [PATCH 383/437] nouveau: PPC fixes. These regs are very touchy. --- shared-core/nouveau_state.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 3ce9247f..add2d598 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -285,7 +285,9 @@ nouveau_card_init(struct drm_device *dev) /* Put the card in BE mode if it's not */ if (NV_READ(NV03_PMC_BOOT_1)) - NV_WRITE(NV03_PMC_BOOT_1,0x01000001); + NV_WRITE(NV03_PMC_BOOT_1,0x00000001); + + DRM_MEMORYBARRIER(); /* Determine exact chipset we're running on */ if (dev_priv->card_type < NV_10) From 6398325ba11da8a01c72f6203af0a2e4b43125c2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Oct 2007 13:27:27 +1100 Subject: [PATCH 384/437] nouveau: Handle multiple PFIFO exceptions per irq, cleanup output. --- shared-core/nouveau_irq.c | 78 +++++++++++++++++++-------------------- shared-core/nouveau_mem.c | 3 +- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 7ba45700..5a696d5e 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -63,58 +63,56 @@ void nouveau_irq_uninstall(struct drm_device *dev) static void nouveau_fifo_irq_handler(struct drm_device *dev) { - uint32_t status, chmode, chstat, channel; struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t status; - status = NV_READ(NV03_PFIFO_INTR_0); - if (!status) - return; - chmode = NV_READ(NV04_PFIFO_MODE); - chstat = NV_READ(NV04_PFIFO_DMA); - channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); + while ((status = NV_READ(NV03_PFIFO_INTR_0))) { + uint32_t chid, get; - if (status & NV_PFIFO_INTR_CACHE_ERROR) { - uint32_t c1get, c1method, c1data; + NV_WRITE(NV03_PFIFO_CACHES, 0); - DRM_ERROR("PFIFO error interrupt\n"); + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & + (nouveau_fifo_number(dev) - 1); + get = NV_READ(NV03_PFIFO_CACHE1_GET); - c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2; - if (dev_priv->card_type < NV_40) { - /* Untested, so it may not work.. */ - c1method = NV_READ(NV04_PFIFO_CACHE1_METHOD(c1get)); - c1data = NV_READ(NV04_PFIFO_CACHE1_DATA(c1get)); - } else { - c1method = NV_READ(NV40_PFIFO_CACHE1_METHOD(c1get)); - c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get)); + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + uint32_t mthd, data; + int ptr; + + ptr = get >> 2; + if (dev_priv->card_type < NV_40) { + mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr)); + data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr)); + } else { + mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr)); + data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr)); + } + + DRM_INFO("PFIFO_CACHE_ERROR - " + "Ch %d/%d Mthd 0x%04x Data 0x%08x\n", + chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); + + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); } - DRM_ERROR("Channel %d/%d - Method 0x%04x, Data 0x%08x\n", - channel, (c1method >> 13) & 7, c1method & 0x1ffc, - c1data); + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid); - status &= ~NV_PFIFO_INTR_CACHE_ERROR; - NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); - } + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); - if (status & NV_PFIFO_INTR_DMA_PUSHER) { - DRM_ERROR("PFIFO DMA pusher interrupt: ch%d, 0x%08x\n", - channel, NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - - status &= ~NV_PFIFO_INTR_DMA_PUSHER; - NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); - - NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); - if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)!=NV_READ(NV04_PFIFO_CACHE1_DMA_GET)) - { - uint32_t getval=NV_READ(NV04_PFIFO_CACHE1_DMA_GET)+4; - NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET,getval); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); + if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get) + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4); } - } - if (status) { - DRM_ERROR("Unhandled PFIFO interrupt: status=0x%08x\n", status); + if (status) { + DRM_INFO("Unhandled PFIFO_INTR - 0x%8x\n", status); + NV_WRITE(NV03_PFIFO_INTR_0, status); + } - NV_WRITE(NV03_PFIFO_INTR_0, status); + NV_WRITE(NV03_PFIFO_CACHES, 1); } NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index e2f0b38d..97691780 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -399,8 +399,7 @@ int nouveau_mem_init(struct drm_device *dev) } /*Note: this is *not* just NV50 code, but only used on NV50 for now */ - if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && - dev_priv->card_type >= NV_50) { + if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { ret = nouveau_sgdma_init(dev); if (!ret) { ret = nouveau_sgdma_nottm_hack_init(dev); From 3af053779cb0fe9b75a657df76c3dd0cc08966b6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Oct 2007 13:32:03 +1100 Subject: [PATCH 385/437] nouveau: Survive PFIFO_CACHE_ERROR. --- shared-core/nouveau_irq.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 5a696d5e..ea93c3fd 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -92,6 +92,9 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev) "Ch %d/%d Mthd 0x%04x Data 0x%08x\n", chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); + NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4); + NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1); + status &= ~NV_PFIFO_INTR_CACHE_ERROR; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); } From 677753047f2b8a8b0b12bae348a2f4f9718682f1 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Oct 2007 14:42:26 +1100 Subject: [PATCH 386/437] nouveau: Cleanup PGRAPH handler, attempt to survive PGRAPH exceptions. --- shared-core/nouveau_irq.c | 230 +++++++++++++++++++------------------- 1 file changed, 113 insertions(+), 117 deletions(-) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index ea93c3fd..ac507299 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -37,7 +37,8 @@ #include "nouveau_reg.h" #include "nouveau_swmthd.h" -void nouveau_irq_preinstall(struct drm_device *dev) +void +nouveau_irq_preinstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -45,7 +46,8 @@ void nouveau_irq_preinstall(struct drm_device *dev) NV_WRITE(NV03_PMC_INTR_EN_0, 0); } -void nouveau_irq_postinstall(struct drm_device *dev) +void +nouveau_irq_postinstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -53,7 +55,8 @@ void nouveau_irq_postinstall(struct drm_device *dev) NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); } -void nouveau_irq_uninstall(struct drm_device *dev) +void +nouveau_irq_uninstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -61,7 +64,8 @@ void nouveau_irq_uninstall(struct drm_device *dev) NV_WRITE(NV03_PMC_INTR_EN_0, 0); } -static void nouveau_fifo_irq_handler(struct drm_device *dev) +static void +nouveau_fifo_irq_handler(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t status; @@ -121,56 +125,7 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev) NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); } -#if 0 -static void nouveau_nv04_context_switch(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t channel,i; - uint32_t max=0; - NV_WRITE(NV04_PGRAPH_FIFO,0x0); - channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - //DRM_INFO("raw PFIFO_CACH1_PHS1 reg is %x\n",NV_READ(NV03_PFIFO_CACHE1_PUSH1)); - //DRM_INFO("currently on channel %d\n",channel); - for (i=0;ififos[i].used)&&(i!=channel)) { - uint32_t put,get,pending; - //put=NV_READ(dev_priv->ramfc_offset+i*32); - //get=NV_READ(dev_priv->ramfc_offset+4+i*32); - put=NV_READ(NV03_FIFO_REGS_DMAPUT(i)); - get=NV_READ(NV03_FIFO_REGS_DMAGET(i)); - pending=NV_READ(NV04_PFIFO_DMA); - //DRM_INFO("Channel %d (put/get %x/%x)\n",i,put,get); - /* mark all pending channels as such */ - if ((put!=get)&!(pending&(1<cur_fifo=channel; - NV_WRITE(NV04_PFIFO_NEXT_CHANNEL,channel|0x100); -#endif - //NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,max|0x100); - //NV_WRITE(0x2050,max|0x100); - - NV_WRITE(NV04_PGRAPH_FIFO,0x1); - -} -#endif - - -struct nouveau_bitfield_names -{ +struct nouveau_bitfield_names { uint32_t mask; const char * name; }; @@ -290,7 +245,7 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) } static void -nouveau_graph_dump_trap_info(struct drm_device *dev) +nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t address; @@ -323,7 +278,7 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) class = NV_READ(0x400814); } - DRM_ERROR("nSource:"); + DRM_INFO("%s - nSource:", id); nouveau_print_bitfield_names(nsource, nouveau_nsource_names, ARRAY_SIZE(nouveau_nsource_names)); printk(", nStatus:"); @@ -335,78 +290,118 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) ARRAY_SIZE(nouveau_nstatus_names_nv10)); printk("\n"); - DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n", - channel, subc, class, method, data2, data); + DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n", + id, channel, subc, class, method, data2, data); } -static void nouveau_pgraph_irq_handler(struct drm_device *dev) +static inline void +nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t status, nsource, method; - uint32_t obj_class; + int handled = 0; - while ( (status = NV_READ(NV03_PGRAPH_INTR)) ) { - nsource = NV_READ(NV03_PGRAPH_NSOURCE); + DRM_DEBUG("PGRAPH notify interrupt\n"); + if (dev_priv->card_type == NV_04 && + (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) { + uint32_t class, mthd; - if (status & NV_PGRAPH_INTR_NOTIFY) { - DRM_DEBUG("PGRAPH notify interrupt\n"); + /* NV4 (nvidia TNT 1) reports software methods with + * PGRAPH NOTIFY ILLEGAL_MTHD + */ + mthd = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC; + class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF; + DRM_DEBUG("Got NV04 software method method %x for class %#x\n", + mthd, class); - if ( dev_priv->card_type == NV_04 && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD ) ) { - /* NV4 (nvidia TNT 1) reports software methods with PGRAPH NOTIFY ILLEGAL_MTHD*/ - method = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC; - obj_class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF; - DRM_DEBUG("Got NV04 software method method %x for class %#x\n", method, obj_class); - - if ( nouveau_sw_method_execute(dev, obj_class, method) ) - DRM_ERROR("Unable to execute NV04 software method %x for object class %x. Please report.\n", method, obj_class); - } - status &= ~NV_PGRAPH_INTR_NOTIFY; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); - NV_WRITE(NV04_PGRAPH_FIFO, 1); - } - - if (status & NV_PGRAPH_INTR_ERROR) { - DRM_ERROR("PGRAPH error interrupt\n"); - - nouveau_graph_dump_trap_info(dev); - - status &= ~NV_PGRAPH_INTR_ERROR; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); - } - - if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { - uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - DRM_DEBUG("PGRAPH context switch interrupt channel %x\n",channel); - switch(dev_priv->card_type) - { - case NV_04: - case NV_05: - nouveau_nv04_context_switch(dev); - break; - case NV_10: - case NV_11: - case NV_17: - nouveau_nv10_context_switch(dev); - break; - default: - DRM_ERROR("Context switch not implemented\n"); - break; + if (nouveau_sw_method_execute(dev, class, mthd)) { + DRM_ERROR("Unable to execute NV04 software method %x " + "for object class %x. Please report.\n", + mthd, class); + } else { + handled = 1; } - - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); - } - - if (status) { - DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status); - NV_WRITE(NV03_PGRAPH_INTR, status); - } -NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); } + if (!handled) + nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY"); } -static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) +static inline void +nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) +{ + nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR"); +} + +static inline void +nouveau_pgraph_intr_context_switch(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t chid; + + chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1); + DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid); + + switch(dev_priv->card_type) { + case NV_04: + case NV_05: + nouveau_nv04_context_switch(dev); + break; + case NV_10: + case NV_11: + case NV_17: + nouveau_nv10_context_switch(dev); + break; + default: + DRM_ERROR("Context switch not implemented\n"); + break; + } +} + +static void +nouveau_pgraph_irq_handler(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t status; + + while ((status = NV_READ(NV03_PGRAPH_INTR))) { + uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE); + + if (status & NV_PGRAPH_INTR_NOTIFY) { + nouveau_pgraph_intr_notify(dev, nsource); + + status &= ~NV_PGRAPH_INTR_NOTIFY; + NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); + } + + if (status & NV_PGRAPH_INTR_ERROR) { + nouveau_pgraph_intr_error(dev, nsource); + + status &= ~NV_PGRAPH_INTR_ERROR; + NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); + } + + if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nouveau_pgraph_intr_context_switch(dev); + + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + NV_WRITE(NV03_PGRAPH_INTR, + NV_PGRAPH_INTR_CONTEXT_SWITCH); + } + + if (status) { + DRM_INFO("Unhandled PGRAPH_INTR - 0x%8x\n", status); + NV_WRITE(NV03_PGRAPH_INTR, status); + } + + if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0) + NV_WRITE(NV04_PGRAPH_FIFO, 1); + } + + NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); +} + +static void +nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -419,7 +414,8 @@ static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) } } -irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) +irqreturn_t +nouveau_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device*)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; From 9fdab5b5c512f586012654917438327b3c67eaa4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Oct 2007 14:43:57 +1100 Subject: [PATCH 387/437] nouveau: revert unintended change. --- shared-core/nouveau_mem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 97691780..e2f0b38d 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -399,7 +399,8 @@ int nouveau_mem_init(struct drm_device *dev) } /*Note: this is *not* just NV50 code, but only used on NV50 for now */ - if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { + if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && + dev_priv->card_type >= NV_50) { ret = nouveau_sgdma_init(dev); if (!ret) { ret = nouveau_sgdma_nottm_hack_init(dev); From 440fc5113ef1ffb1a22bff92cf34eaf23896db8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=5Butf-8=5D=20Kristian=20H=C3=B8gsberg?= Date: Tue, 9 Oct 2007 21:09:29 -0400 Subject: [PATCH 388/437] Eliminate support for fake buffers. --- libdrm/xf86drm.c | 9 ++---- linux-core/drm_bo.c | 68 ++------------------------------------------- shared-core/drm.h | 1 - 3 files changed, 5 insertions(+), 73 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index dc18d6f9..bb2b3abe 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2726,9 +2726,6 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, req->buffer_start = (unsigned long) user_buffer; buf->virtual = user_buffer; break; - case drm_bo_type_fake: - req->buffer_start = start; - break; default: return -EINVAL; } @@ -2751,7 +2748,7 @@ int drmBODestroy(int fd, drmBO *buf) { struct drm_bo_handle_arg arg; - if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) { + if (buf->mapVirtual) { (void) drmUnmap(buf->mapVirtual, buf->start + buf->size); buf->mapVirtual = NULL; buf->virtual = NULL; @@ -2792,7 +2789,7 @@ int drmBOUnReference(int fd, drmBO *buf) { struct drm_bo_handle_arg arg; - if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) { + if (buf->mapVirtual) { (void) munmap(buf->mapVirtual, buf->start + buf->size); buf->mapVirtual = NULL; buf->virtual = NULL; @@ -2827,7 +2824,7 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, * Make sure we have a virtual address of the buffer. */ - if (!buf->virtual && buf->type != drm_bo_type_fake) { + if (!buf->virtual) { drmAddress virtual; virtual = mmap(0, buf->size + buf->start, PROT_READ | PROT_WRITE, MAP_SHARED, diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4e735770..7dd9856d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -148,7 +148,6 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo) ret = -ENOMEM; break; case drm_bo_type_user: - case drm_bo_type_fake: break; default: DRM_ERROR("Illegal buffer object type\n"); @@ -695,12 +694,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, evict_mem = bo->mem; evict_mem.mm_node = NULL; - if (bo->type == drm_bo_type_fake) { - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - bo->mem.mm_node = NULL; - goto out1; - } - evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); ret = drm_bo_mem_space(bo, &evict_mem, no_wait); @@ -720,7 +713,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, goto out; } - out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { if (evict_mem.mm_node != bo->pinned_node) @@ -1355,44 +1347,6 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) return 1; } -static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) -{ - struct drm_buffer_manager *bm = &dev->bm; - struct drm_mem_type_manager *man; - uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; - const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; - uint32_t i; - int type_ok = 0; - uint32_t mem_type = 0; - uint32_t cur_flags; - - if (drm_bo_mem_compat(mem)) - return 0; - - BUG_ON(mem->mm_node); - - for (i = 0; i < num_prios; ++i) { - mem_type = prios[i]; - man = &bm->man[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); - if (type_ok) - break; - } - - if (type_ok) { - mem->mm_node = NULL; - mem->mem_type = mem_type; - mem->flags = cur_flags; - DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); - return 0; - } - - DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", - (unsigned long long) mem->mask); - return -EINVAL; -} - /* * bo locked. */ @@ -1449,11 +1403,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, DRM_ERROR("Timed out waiting for buffer unmap.\n"); return ret; } - if (bo->type == drm_bo_type_fake) { - ret = drm_bo_check_fake(dev, &bo->mem); - if (ret) - return ret; - } /* * Check whether we need to move buffer. @@ -1642,7 +1591,7 @@ int drm_buffer_object_create(struct drm_device *dev, int ret = 0; unsigned long num_pages; - if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { + if (buffer_start & ~PAGE_MASK) { DRM_ERROR("Invalid buffer object start.\n"); return -EINVAL; } @@ -1677,12 +1626,7 @@ int drm_buffer_object_create(struct drm_device *dev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - if (bo->type == drm_bo_type_fake) { - bo->offset = buffer_start; - bo->buffer_start = 0; - } else { - bo->buffer_start = buffer_start; - } + bo->buffer_start = buffer_start; bo->priv_flags = 0; bo->mem.flags = 0ULL; bo->mem.mask = 0ULL; @@ -1707,12 +1651,6 @@ int drm_buffer_object_create(struct drm_device *dev, goto out_err; } - if (bo->type == drm_bo_type_fake) { - ret = drm_bo_check_fake(dev, &bo->mem); - if (ret) - goto out_err; - } - ret = drm_bo_add_ttm(bo); if (ret) goto out_err; @@ -1852,8 +1790,6 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - if (req->type == drm_bo_type_fake) - LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_buffer_object_create(file_priv->head->dev, req->size, req->type, req->mask, diff --git a/shared-core/drm.h b/shared-core/drm.h index 19292344..279f858f 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -757,7 +757,6 @@ struct drm_fence_arg { enum drm_bo_type { drm_bo_type_dc, drm_bo_type_user, - drm_bo_type_fake, drm_bo_type_kernel, /* for initial kernel allocations */ }; From dccefba71a65566e7e1628b3be67621866000411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 9 Oct 2007 21:09:30 -0400 Subject: [PATCH 389/437] Take bo type argument out of the ioctl interface. The buffer object type is still tracked internally, but it is no longer part of the user space visible ioctl interface. If the bo create ioctl specifies a non-NULL buffer address we assume drm_bo_type_user, otherwise drm_bo_type_dc. Kernel side allocations call drm_buffer_object_create() directly and can still specify drm_bo_type_kernel. Not 100% this makes sense either, but with this patch, the buffer type is no longer exported and we can clean up the internals later on. --- libdrm/xf86drm.c | 19 +++---------------- libdrm/xf86mm.h | 8 +++----- linux-core/drm_bo.c | 11 +++++++---- linux-core/drm_objects.h | 6 ++++++ shared-core/drm.h | 8 -------- 5 files changed, 19 insertions(+), 33 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index bb2b3abe..c450a985 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2698,8 +2698,8 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) -int drmBOCreate(int fd, unsigned long start, unsigned long size, - unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, +int drmBOCreate(int fd, unsigned long size, + unsigned pageAlignment, void *user_buffer, uint64_t mask, unsigned hint, drmBO *buf) { @@ -2713,23 +2713,11 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, req->mask = mask; req->hint = hint; req->size = size; - req->type = type; req->page_alignment = pageAlignment; + req->buffer_start = (unsigned long) user_buffer; buf->virtual = NULL; - switch(type) { - case drm_bo_type_dc: - req->buffer_start = start; - break; - case drm_bo_type_user: - req->buffer_start = (unsigned long) user_buffer; - buf->virtual = user_buffer; - break; - default: - return -EINVAL; - } - do { ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg); } while (ret != 0 && errno == EAGAIN); @@ -2777,7 +2765,6 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf) return -errno; drmBOCopyReply(rep, buf); - buf->type = drm_bo_type_dc; buf->mapVirtual = NULL; buf->mapCount = 0; buf->virtual = NULL; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index cacd13af..0dac7eff 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -106,7 +106,6 @@ typedef struct _drmFence typedef struct _drmBO { - drm_bo_type_t type; unsigned handle; uint64_t mapHandle; uint64_t flags; @@ -179,10 +178,9 @@ extern int drmBOCreateList(int numTarget, drmBOList *list); * Buffer object functions. */ -extern int drmBOCreate(int fd, unsigned long start, unsigned long size, - unsigned pageAlignment,void *user_buffer, - drm_bo_type_t type, uint64_t mask, - unsigned hint, drmBO *buf); +extern int drmBOCreate(int fd, unsigned long size, + unsigned pageAlignment, void *user_buffer, + uint64_t mask, unsigned hint, drmBO *buf); extern int drmBODestroy(int fd, drmBO *buf); extern int drmBOReference(int fd, unsigned handle, drmBO *buf); extern int drmBOUnReference(int fd, drmBO *buf); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 7dd9856d..e2f460ed 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1620,7 +1620,10 @@ int drm_buffer_object_create(struct drm_device *dev, INIT_LIST_HEAD(&bo->vma_list); #endif bo->dev = dev; - bo->type = type; + if (buffer_start != 0) + bo->type = drm_bo_type_user; + else + bo->type = type; bo->num_pages = num_pages; bo->mem.mem_type = DRM_BO_MEM_LOCAL; bo->mem.num_pages = bo->num_pages; @@ -1783,8 +1786,8 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_buffer_object *entry; int ret = 0; - DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n", - (int)(req->size / 1024), req->page_alignment * 4, req->type); + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", + (int)(req->size / 1024), req->page_alignment * 4); if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1792,7 +1795,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil } ret = drm_buffer_object_create(file_priv->head->dev, - req->size, req->type, req->mask, + req->size, drm_bo_type_dc, req->mask, req->hint, req->page_alignment, req->buffer_start, &entry); if (ret) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 9748baae..b58db57f 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -350,6 +350,12 @@ struct drm_bo_mem_reg { uint32_t hw_tile_stride; }; +enum drm_bo_type { + drm_bo_type_dc, + drm_bo_type_user, + drm_bo_type_kernel, /* for initial kernel allocations */ +}; + struct drm_buffer_object { struct drm_device *dev; struct drm_user_object base; diff --git a/shared-core/drm.h b/shared-core/drm.h index 279f858f..568b1003 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -754,12 +754,6 @@ struct drm_fence_arg { #define DRM_BO_INIT_MINOR 1 -enum drm_bo_type { - drm_bo_type_dc, - drm_bo_type_user, - drm_bo_type_kernel, /* for initial kernel allocations */ -}; - struct drm_bo_info_req { uint64_t mask; uint64_t flags; @@ -775,8 +769,6 @@ struct drm_bo_create_req { uint64_t buffer_start; unsigned int hint; unsigned int page_alignment; - enum drm_bo_type type; - unsigned int pad64; }; struct drm_bo_op_req { From a69c85fec8ed323bffb1324ea08157b3897e97db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 9 Oct 2007 21:09:31 -0400 Subject: [PATCH 390/437] Drop destroy ioctls for fences and buffer objects. We now always create a drm_ref_object for user objects and this is then the only things that holds a reference to the user object. This way unreference on will destroy the user object when the last drm_ref_object goes way. --- libdrm/xf86drm.c | 32 -------------------------------- libdrm/xf86mm.h | 2 -- linux-core/drmP.h | 1 - linux-core/drm_bo.c | 31 ++----------------------------- linux-core/drm_drv.c | 2 -- linux-core/drm_fence.c | 28 +--------------------------- linux-core/drm_fops.c | 13 ------------- linux-core/drm_object.c | 27 +++++++-------------------- linux-core/drm_objects.h | 12 ------------ shared-core/drm.h | 2 -- 10 files changed, 10 insertions(+), 140 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index c450a985..7666d431 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2389,18 +2389,6 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fenc fence->signaled = 0; return 0; } - -int drmFenceDestroy(int fd, const drmFence *fence) -{ - drm_fence_arg_t arg; - - memset(&arg, 0, sizeof(arg)); - arg.handle = fence->handle; - - if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, &arg)) - return -errno; - return 0; -} int drmFenceReference(int fd, unsigned handle, drmFence *fence) { @@ -2732,26 +2720,6 @@ int drmBOCreate(int fd, unsigned long size, return 0; } -int drmBODestroy(int fd, drmBO *buf) -{ - struct drm_bo_handle_arg arg; - - if (buf->mapVirtual) { - (void) drmUnmap(buf->mapVirtual, buf->start + buf->size); - buf->mapVirtual = NULL; - buf->virtual = NULL; - } - - memset(&arg, 0, sizeof(arg)); - arg.handle = buf->handle; - - if (ioctl(fd, DRM_IOCTL_BO_DESTROY, &arg)) - return -errno; - - buf->handle = 0; - return 0; -} - int drmBOReference(int fd, unsigned handle, drmBO *buf) { struct drm_bo_reference_info_arg arg; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index 0dac7eff..d99e61e7 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -150,7 +150,6 @@ typedef struct _drmBOList { extern int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type, drmFence *fence); -extern int drmFenceDestroy(int fd, const drmFence *fence); extern int drmFenceReference(int fd, unsigned handle, drmFence *fence); extern int drmFenceUnreference(int fd, const drmFence *fence); extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type); @@ -181,7 +180,6 @@ extern int drmBOCreateList(int numTarget, drmBOList *list); extern int drmBOCreate(int fd, unsigned long size, unsigned pageAlignment, void *user_buffer, uint64_t mask, unsigned hint, drmBO *buf); -extern int drmBODestroy(int fd, drmBO *buf); extern int drmBOReference(int fd, unsigned handle, drmBO *buf); extern int drmBOUnReference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, diff --git a/linux-core/drmP.h b/linux-core/drmP.h index f8ca3f4b..d0ab2c94 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -428,7 +428,6 @@ struct drm_file { */ struct list_head refd_objects; - struct list_head user_objects; struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; struct file *filp; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index e2f460ed..fb360e7f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1674,8 +1674,8 @@ int drm_buffer_object_create(struct drm_device *dev, } EXPORT_SYMBOL(drm_buffer_object_create); -int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int shareable) +static int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { struct drm_device *dev = file_priv->head->dev; int ret; @@ -1694,7 +1694,6 @@ int drm_bo_add_user_object(struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); return ret; } -EXPORT_SYMBOL(drm_bo_add_user_object); static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { @@ -1816,32 +1815,6 @@ out: return ret; } - -int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_handle_arg *arg = data; - struct drm_user_object *uo; - int ret = 0; - - DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle); - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg->handle); - if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(file_priv, uo); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_map_wait_idle_arg *arg = data; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index a09fa96e..0fca3a27 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -129,7 +129,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), @@ -139,7 +138,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 9a29356b..d1969f86 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -517,7 +517,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class, return ret; } -int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, +static int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -535,7 +535,6 @@ out: mutex_unlock(&dev->struct_mutex); return ret; } -EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object ** c_fence) @@ -670,31 +669,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * return ret; } -int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_user_object *uo; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg->handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(file_priv, uo); - mutex_unlock(&dev->struct_mutex); - return ret; -} - - int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index ab5f4ca5..0ccaed5b 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -263,7 +263,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp, priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); - INIT_LIST_HEAD(&priv->user_objects); INIT_LIST_HEAD(&priv->refd_objects); for (i=0; i<_DRM_NO_REF_TYPES; ++i) { @@ -338,7 +337,6 @@ static void drm_object_release(struct file *filp) { struct drm_file *priv = filp->private_data; struct list_head *head; - struct drm_user_object *user_object; struct drm_ref_object *ref_object; int i; @@ -357,17 +355,6 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; } - /* - * Free leftover user objects created by me. - */ - - head = &priv->user_objects; - while (head->next != head) { - user_object = list_entry(head->next, struct drm_user_object, list); - drm_remove_user_object(priv, user_object); - head = &priv->user_objects; - } - for(i=0; i<_DRM_NO_REF_TYPES; ++i) { drm_ht_remove(&priv->refd_object_hash[i]); } diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 6bd89b1d..a6d6c0d7 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -38,7 +38,8 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, DRM_ASSERT_LOCKED(&dev->struct_mutex); - atomic_set(&item->refcount, 1); + /* The refcount will be bumped to 1 when we add the ref object below. */ + atomic_set(&item->refcount, 0); item->shareable = shareable; item->owner = priv; @@ -47,8 +48,11 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, if (ret) return ret; - list_add_tail(&item->list, &priv->user_objects); - return 0; + ret = drm_add_ref_object(priv, item, _DRM_REF_USE); + if (ret) + ret = drm_ht_remove_item(&dev->object_hash, &item->hash); + + return ret; } EXPORT_SYMBOL(drm_add_user_object); @@ -87,27 +91,10 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object if (atomic_dec_and_test(&item->refcount)) { ret = drm_ht_remove_item(&dev->object_hash, &item->hash); BUG_ON(ret); - list_del_init(&item->list); item->remove(priv, item); } } -int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) -{ - DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); - - if (item->owner != priv) { - DRM_ERROR("Cannot destroy object not owned by you.\n"); - return -EINVAL; - } - item->owner = 0; - item->shareable = 0; - list_del_init(&item->list); - drm_deref_user_object(priv, item); - return 0; -} -EXPORT_SYMBOL(drm_remove_user_object); - static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, enum drm_ref_type action) { diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index b58db57f..67c33745 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -102,15 +102,6 @@ extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); -/* - * Must be called with the struct_mutex held. - * If "item" has been obtained by a call to drm_lookup_user_object. You may not - * release the struct_mutex before calling drm_remove_ref_object. - * This function may temporarily release the struct_mutex. - */ - -extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); - /* * Must be called with the struct_mutex held. May temporarily release it. */ @@ -222,9 +213,6 @@ extern int drm_fence_object_emit(struct drm_fence_object * fence, extern void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg); -extern int drm_fence_add_user_object(struct drm_file * priv, - struct drm_fence_object * fence, int shareable); - extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, diff --git a/shared-core/drm.h b/shared-core/drm.h index 568b1003..30650bd9 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -962,7 +962,6 @@ struct drm_mm_init_arg { #define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) #define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, struct drm_fence_arg) #define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) #define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) @@ -972,7 +971,6 @@ struct drm_mm_init_arg { #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) -#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) From efc4fd7c4dabed384fa1ab67e744d38025aff47d Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 16 Oct 2007 22:08:55 +1100 Subject: [PATCH 391/437] drm: rename drmBOUnReference to drmBOUnreference for consistency --- libdrm/xf86drm.c | 2 +- libdrm/xf86mm.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 7666d431..54268f24 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2740,7 +2740,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf) return 0; } -int drmBOUnReference(int fd, drmBO *buf) +int drmBOUnreference(int fd, drmBO *buf) { struct drm_bo_handle_arg arg; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index d99e61e7..5f5f6f9f 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -181,7 +181,7 @@ extern int drmBOCreate(int fd, unsigned long size, unsigned pageAlignment, void *user_buffer, uint64_t mask, unsigned hint, drmBO *buf); extern int drmBOReference(int fd, unsigned handle, drmBO *buf); -extern int drmBOUnReference(int fd, drmBO *buf); +extern int drmBOUnreference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address); extern int drmBOUnmap(int fd, drmBO *buf); From 3f1aa1550353e828814169915c9a74c67d2e81cd Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 16 Oct 2007 22:28:00 +1100 Subject: [PATCH 392/437] drm: drop drm bo list handling code --- libdrm/xf86drm.c | 317 ----------------------------------------------- libdrm/xf86mm.h | 34 ----- 2 files changed, 351 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 54268f24..bd92ed2d 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2529,144 +2529,6 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type) return 0; } -static int drmAdjustListNodes(drmBOList *list) -{ - drmBONode *node; - drmMMListHead *l; - int ret = 0; - - while(list->numCurrent < list->numTarget) { - node = (drmBONode *) malloc(sizeof(*node)); - if (!node) { - ret = -ENOMEM; - break; - } - list->numCurrent++; - DRMLISTADD(&node->head, &list->free); - } - - while(list->numCurrent > list->numTarget) { - l = list->free.next; - if (l == &list->free) - break; - DRMLISTDEL(l); - node = DRMLISTENTRY(drmBONode, l, head); - free(node); - list->numCurrent--; - } - return ret; -} - -void drmBOFreeList(drmBOList *list) -{ - drmBONode *node; - drmMMListHead *l; - - l = list->list.next; - while(l != &list->list) { - DRMLISTDEL(l); - node = DRMLISTENTRY(drmBONode, l, head); - free(node); - l = list->list.next; - list->numCurrent--; - list->numOnList--; - } - - l = list->free.next; - while(l != &list->free) { - DRMLISTDEL(l); - node = DRMLISTENTRY(drmBONode, l, head); - free(node); - l = list->free.next; - list->numCurrent--; - } -} - -int drmBOResetList(drmBOList *list) -{ - drmMMListHead *l; - int ret; - - ret = drmAdjustListNodes(list); - if (ret) - return ret; - - l = list->list.next; - while (l != &list->list) { - DRMLISTDEL(l); - DRMLISTADD(l, &list->free); - list->numOnList--; - l = list->list.next; - } - return drmAdjustListNodes(list); -} - -static drmBONode *drmAddListItem(drmBOList *list, drmBO *item, - unsigned long arg0, - unsigned long arg1) -{ - drmBONode *node; - drmMMListHead *l; - - l = list->free.next; - if (l == &list->free) { - node = (drmBONode *) malloc(sizeof(*node)); - if (!node) { - return NULL; - } - list->numCurrent++; - } - else { - DRMLISTDEL(l); - node = DRMLISTENTRY(drmBONode, l, head); - } - node->buf = item; - node->arg0 = arg0; - node->arg1 = arg1; - DRMLISTADD(&node->head, &list->list); - list->numOnList++; - return node; -} - -void *drmBOListIterator(drmBOList *list) -{ - void *ret = list->list.next; - - if (ret == &list->list) - return NULL; - return ret; -} - -void *drmBOListNext(drmBOList *list, void *iterator) -{ - void *ret; - - drmMMListHead *l = (drmMMListHead *) iterator; - ret = l->next; - if (ret == &list->list) - return NULL; - return ret; -} - -drmBO *drmBOListBuf(void *iterator) -{ - drmBONode *node; - drmMMListHead *l = (drmMMListHead *) iterator; - node = DRMLISTENTRY(drmBONode, l, head); - return node->buf; -} - - -int drmBOCreateList(int numTarget, drmBOList *list) -{ - DRMINITLISTHEAD(&list->list); - DRMINITLISTHEAD(&list->free); - list->numTarget = numTarget; - list->numCurrent = 0; - list->numOnList = 0; - return drmAdjustListNodes(list); -} - static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) { buf->handle = rep->handle; @@ -2971,185 +2833,6 @@ int drmBOBusy(int fd, drmBO *buf, int *busy) } } -int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, - unsigned mask, - int *newItem) -{ - drmBONode *node, *cur; - drmMMListHead *l; - - *newItem = 0; - cur = NULL; - - for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); - if (node->buf == buf) { - cur = node; - break; - } - } - if (!cur) { - cur = drmAddListItem(list, buf, flags, mask); - if (!cur) { - drmMsg("Out of memory creating validate list node.\n"); - return -ENOMEM; - } - *newItem = 1; - cur->arg0 = flags; - cur->arg1 = mask; - } - else { - unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM; - unsigned memFlags = cur->arg0 & flags & memMask; - - if (!memFlags) { - drmMsg("Incompatible memory location requests " - "on validate list.\n"); - drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n", - cur->arg0, cur->arg1); - drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n", - flags, mask); - return -EINVAL; - } - if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) { - drmMsg("Incompatible buffer flag requests " - "on validate list.\n"); - drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n", - cur->arg0, cur->arg1); - drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n", - flags, mask); - return -EINVAL; - } - cur->arg1 |= mask; - cur->arg0 = memFlags | ((cur->arg0 | flags) & - cur->arg1 & ~DRM_BO_MASK_MEM); - } - return 0; -} - - -int drmBOValidateList(int fd, drmBOList *list) -{ - drmBONode *node; - drmMMListHead *l; - struct drm_bo_op_arg *arg, *first; - struct drm_bo_op_req *req; - struct drm_bo_arg_rep *rep; - uint64_t *prevNext = NULL; - drmBO *buf; - int ret; - - first = NULL; - - for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); - - arg = &node->bo_arg; - req = &arg->d.req; - - if (!first) - first = arg; - - if (prevNext) - *prevNext = (unsigned long) arg; - - memset(arg, 0, sizeof(*arg)); - prevNext = &arg->next; - req->bo_req.handle = node->buf->handle; - req->op = drm_bo_validate; - req->bo_req.flags = node->arg0; - req->bo_req.hint = 0; - req->bo_req.mask = node->arg1; - req->bo_req.fence_class = 0; /* Backwards compat. */ - } - - if (!first) - return 0; - - do{ - ret = ioctl(fd, DRM_IOCTL_BO_OP, first); - } while (ret && errno == EAGAIN); - - if (ret) - return -errno; - - for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); - arg = &node->bo_arg; - rep = &arg->d.rep; - - if (!arg->handled) { - drmMsg("Unhandled request\n"); - return -EFAULT; - } - if (rep->ret) - return rep->ret; - - buf = node->buf; - drmBOCopyReply(&rep->bo_info, buf); - } - - return 0; -} - -int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) -{ - drmBONode *node; - drmMMListHead *l; - struct drm_bo_op_arg *arg, *first; - struct drm_bo_op_req *req; - struct drm_bo_arg_rep *rep; - uint64_t *prevNext = NULL; - drmBO *buf; - unsigned fence_flags; - int ret; - - first = NULL; - - for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); - - arg = &node->bo_arg; - req = &arg->d.req; - - if (!first) - first = arg; - - if (prevNext) - *prevNext = (unsigned long) arg; - - memset(arg, 0, sizeof(*arg)); - prevNext = &arg->next; - req->bo_req.handle = node->buf->handle; - req->op = drm_bo_fence; - req->bo_req.mask = node->arg0; - req->arg_handle = fenceHandle; - } - - if (!first) - return 0; - - ret = ioctl(fd, DRM_IOCTL_BO_OP, first); - - if (ret) - return -errno; - - for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); - - arg = &node->bo_arg; - rep = &arg->d.rep; - - if (!arg->handled) - return -EFAULT; - if (rep->ret) - return rep->ret; - drmBOCopyReply(&rep->bo_info, node->buf); - } - - return 0; -} - int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, unsigned memType) { diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index 5f5f6f9f..f817d81a 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -126,24 +126,6 @@ typedef struct _drmBO unsigned pad[8]; /* for future expansion */ } drmBO; -typedef struct _drmBONode -{ - drmMMListHead head; - drmBO *buf; - struct drm_bo_op_arg bo_arg; - unsigned long arg0; - unsigned long arg1; -} drmBONode; - -typedef struct _drmBOList { - unsigned numTarget; - unsigned numCurrent; - unsigned numOnList; - drmMMListHead list; - drmMMListHead free; -} drmBOList; - - /* * Fence functions. */ @@ -162,17 +144,6 @@ extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence, extern int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence); -/* - * Buffer object list functions. - */ - -extern void drmBOFreeList(drmBOList *list); -extern int drmBOResetList(drmBOList *list); -extern void *drmBOListIterator(drmBOList *list); -extern void *drmBOListNext(drmBOList *list, void *iterator); -extern drmBO *drmBOListBuf(void *iterator); -extern int drmBOCreateList(int numTarget, drmBOList *list); - /* * Buffer object functions. */ @@ -192,11 +163,6 @@ extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle); extern int drmBOInfo(int fd, drmBO *buf); extern int drmBOBusy(int fd, drmBO *buf, int *busy); -extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, - unsigned mask, - int *newItem); -extern int drmBOValidateList(int fd, drmBOList *list); -extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle); extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint); int drmBOSetPin(int fd, drmBO *buf, int pin); From db1709f2f3f8cab2477fb149b58420de4db65654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 16 Oct 2007 15:10:08 +0200 Subject: [PATCH 393/437] Revert part of earlier commit that caused an unresolved symbol for i915. --- linux-core/drm_fence.c | 3 ++- linux-core/drm_objects.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index d1969f86..e696b42d 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -517,7 +517,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class, return ret; } -static int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -535,6 +535,7 @@ out: mutex_unlock(&dev->struct_mutex); return ret; } +EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object ** c_fence) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 67c33745..726ccbe2 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -213,6 +213,9 @@ extern int drm_fence_object_emit(struct drm_fence_object * fence, extern void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg); +extern int drm_fence_add_user_object(struct drm_file * priv, + struct drm_fence_object * fence, int shareable); + extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, From ec1162b212248042bf1317abcb3c47bb10db8aa3 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 17 Oct 2007 15:36:14 +1000 Subject: [PATCH 394/437] i915: lock struct mutex about buffer object lookups --- shared-core/i915_dma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 7209a8de..5a51f6ef 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -805,7 +805,9 @@ int i915_process_relocs(struct drm_file *file_priv, memset(&reloc_kmap, 0, sizeof(reloc_kmap)); + mutex_lock(&dev->struct_mutex); reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1); + mutex_unlock(&dev->struct_mutex); if (!reloc_list_object) return -EINVAL; @@ -905,7 +907,9 @@ int i915_validate_buffer_list(struct drm_file *file_priv, if (arg.handled) { data = arg.next; + mutex_lock(&dev->struct_mutex); buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1); + mutex_unlock(&dev->struct_mutex); buf_count++; continue; } @@ -948,7 +952,9 @@ int i915_validate_buffer_list(struct drm_file *file_priv, if (buf_reloc_handle) { memset(&relocatee, 0, sizeof(relocatee)); + mutex_lock(&dev->struct_mutex); relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); + mutex_unlock(&dev->struct_mutex); if (!relocatee.buf) { DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); ret = -EINVAL; From 646560d1d112b58899f9e4cab0c966cec7e0b8c3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 09:50:29 +0200 Subject: [PATCH 395/437] Revert "Add some more verbosity to drm_bo_set_pin_req comments." This reverts e7bfeb3031374653f7e55d67cc1b5c823849359f commit. --- shared-core/drm.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/shared-core/drm.h b/shared-core/drm.h index 30650bd9..56edaeef 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -786,9 +786,7 @@ struct drm_bo_set_pin_req { unsigned int handle; /** * - 0: Unpin the given buffer object. - * - 1: Pin the given buffer object, requiring that its offset and - * memory area stay constant until unpin. The intended use is for - * scanout buffers. + * - 1: Pin the given buffer object. */ unsigned int pin; }; From 12b989a7108a52f16b1b1bb6dd2ea818c235b52c Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 09:51:01 +0200 Subject: [PATCH 396/437] Revert "Remove the pinned buffer from the LRU when pinning." This reverts 3a0bc518e35c62bb9c64c9105f836584d949653f commit. --- linux-core/Makefile | 1 - linux-core/drm_bo.c | 12 ++++-------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/linux-core/Makefile b/linux-core/Makefile index 6eb5bf5c..7f6b123e 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -269,7 +269,6 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif -EXTRA_CFLAGS += -g # Start with all modules turned off. CONFIG_DRM_GAMMA := n diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index fb360e7f..099ebe07 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1965,8 +1965,8 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Validate the buffer into its pinned location, with no - * pending fence. + /* Validate the buffer into its pinned location, with no pending + * fence. */ ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); if (ret) { @@ -1974,12 +1974,9 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Pull the buffer off of the LRU and add it to the pinned - * list - */ + /* Add our buffer to the pinned list */ bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); list_del_init(&bo->pinned_lru); drm_bo_add_to_pinned_lru(bo); @@ -1989,7 +1986,6 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, bo->pinned_node = bo->mem.mm_node; } - bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } else { @@ -2001,9 +1997,9 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, list_del_init(&bo->pinned_lru); bo->pinned_node = NULL; - bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } + bo->pinned = pin; mutex_unlock(&bo->mutex); return 0; } From cd276d9cab0be8eff2d9450e5c95b6eb3cd639af Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 10:01:30 +0200 Subject: [PATCH 397/437] Revert "Copy the important parts of object_validate into object_create()." This reverts f9c27aa50b715a7d21858f1ce9e4785120bd0c36 commit. --- linux-core/drm_bo.c | 44 +++++++++----------------------------------- 1 file changed, 9 insertions(+), 35 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 099ebe07..6e1de80b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1587,7 +1587,6 @@ int drm_buffer_object_create(struct drm_device *dev, { struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *bo; - struct drm_bo_driver *driver = dev->driver->bo_driver; int ret = 0; unsigned long num_pages; @@ -1646,7 +1645,7 @@ int drm_buffer_object_create(struct drm_device *dev, if (ret) goto out_err; } - +#if 0 bo->fence_class = 0; ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); if (ret) { @@ -1655,13 +1654,12 @@ int drm_buffer_object_create(struct drm_device *dev, } ret = drm_bo_add_ttm(bo); +#else + ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); +#endif if (ret) goto out_err; - mutex_lock(&dev->struct_mutex); - drm_bo_add_to_lru(bo); - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&bo->mutex); *buf_obj = bo; return 0; @@ -1711,8 +1709,6 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr void __user *curuserarg = NULL; int ret; - DRM_DEBUG("drm_bo_op_ioctl\n"); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1792,6 +1788,11 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } +#if 0 + ret = drm_bo_lock_test(dev, file_priv); + if (ret) + goto out; +#endif ret = drm_buffer_object_create(file_priv->head->dev, req->size, drm_bo_type_dc, req->mask, @@ -1821,9 +1822,6 @@ int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_p struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; - - DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1841,9 +1839,6 @@ int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file { struct drm_bo_handle_arg *arg = data; int ret; - - DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1862,8 +1857,6 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_user_object *uo; int ret; - DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1886,8 +1879,6 @@ int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_bo_handle_arg *arg = data; int ret = 0; - DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1904,8 +1895,6 @@ int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_info_rep *rep = &arg->d.rep; int ret; - DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1924,9 +1913,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; - - DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -2013,9 +1999,6 @@ int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_buffer_object *bo; int ret; - DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n", - req->handle, req->pin); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -2445,9 +2428,6 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n", - arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4)); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2502,8 +2482,6 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2541,8 +2519,6 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2565,8 +2541,6 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_unlock_ioctl\n"); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; From 0d1926d36e59ddfc34d8c9c0cdef10b71a49ecf1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 10:14:41 +0200 Subject: [PATCH 398/437] Revert "Replace NO_MOVE/NO_EVICT flags to buffer objects with an ioctl to set pinning." This reverts cf2d569daca6954d11a796f4d110148ae2e0c827 commit. --- libdrm/xf86drm.c | 25 +----- libdrm/xf86mm.h | 1 - linux-core/drm_bo.c | 174 ++++++++++++--------------------------- linux-core/drm_drv.c | 1 - linux-core/drm_objects.h | 3 +- shared-core/drm.h | 27 ++---- 6 files changed, 62 insertions(+), 169 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index bd92ed2d..b8f3b986 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2793,30 +2793,7 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint) } return 0; } - -int drmBOSetPin(int fd, drmBO *buf, int pin) -{ - struct drm_bo_set_pin_arg arg; - struct drm_bo_set_pin_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; - int ret = 0; - - memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->pin = pin; - - do { - ret = ioctl(fd, DRM_IOCTL_BO_SET_PIN, &arg); - } while (ret && errno == EAGAIN); - - if (ret) - return -errno; - - drmBOCopyReply(rep, buf); - - return 0; -} - + int drmBOBusy(int fd, drmBO *buf, int *busy) { if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) && diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index f817d81a..f8ec1d75 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -164,7 +164,6 @@ extern int drmBOInfo(int fd, drmBO *buf); extern int drmBOBusy(int fd, drmBO *buf, int *busy); extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint); -int drmBOSetPin(int fd, drmBO *buf, int pin); /* * Initialization functions. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 6e1de80b..7335d258 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -80,7 +80,8 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo) DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); - if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) { + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) + || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); } else { @@ -638,8 +639,7 @@ int drm_fence_buffer_objects(struct drm_device *dev, mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); list_del_init(l); - if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED && - entry->fence_class == fence_class) { + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { count++; if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); @@ -761,7 +761,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->pinned); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -929,6 +929,18 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } + if (bo->type == drm_bo_type_fake && + !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + DRM_ERROR("Fake buffers must be pinned.\n"); + return -EINVAL; + } + + if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " + "processes\n"); + return -EPERM; + } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1372,12 +1384,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } - if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) { - DRM_ERROR("Attempt to validate pinned buffer into different memory " - "type\n"); - return -EINVAL; - } - /* * We're switching command submission mechanism, * or cannot simply rely on the hardware serializing for us. @@ -1418,6 +1424,37 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, } } + /* + * Pinned buffers. + */ + + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + + if (bo->pinned_node != bo->mem.mm_node) + drm_mm_put_block(bo->pinned_node); + + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + + } + /* * We might need to add a TTM. */ @@ -1517,10 +1554,6 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, } EXPORT_SYMBOL(drm_bo_handle_validate); -/** - * Fills out the generic buffer object ioctl reply with the information for - * the BO with id of handle. - */ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { @@ -1926,112 +1959,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } -/** - * Pins or unpins the given buffer object in the given memory area. - * - * Pinned buffers will not be evicted from or move within their memory area. - * Must be called with the hardware lock held for pinning. - */ -static int -drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, - int pin) -{ - int ret = 0; - - mutex_lock(&bo->mutex); - if (bo->pinned == pin) { - mutex_unlock(&bo->mutex); - return 0; - } - - if (pin) { - ret = drm_bo_wait_unfenced(bo, 0, 0); - if (ret) { - mutex_unlock(&bo->mutex); - return ret; - } - - /* Validate the buffer into its pinned location, with no pending - * fence. - */ - ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); - if (ret) { - mutex_unlock(&bo->mutex); - return ret; - } - - /* Add our buffer to the pinned list */ - bo->pinned_mem_type = bo->mem.mem_type; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->pinned_lru); - drm_bo_add_to_pinned_lru(bo); - - if (bo->pinned_node != bo->mem.mm_node) { - if (bo->pinned_node != NULL) - drm_mm_put_block(bo->pinned_node); - bo->pinned_node = bo->mem.mm_node; - } - - mutex_unlock(&dev->struct_mutex); - - } else { - mutex_lock(&dev->struct_mutex); - - /* Remove our buffer from the pinned list */ - if (bo->pinned_node != bo->mem.mm_node) - drm_mm_put_block(bo->pinned_node); - - list_del_init(&bo->pinned_lru); - bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); - } - bo->pinned = pin; - mutex_unlock(&bo->mutex); - return 0; -} - -int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_bo_set_pin_arg *arg = data; - struct drm_bo_set_pin_req *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - struct drm_buffer_object *bo; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - if (req->pin < 0 || req->pin > 1) { - DRM_ERROR("Bad arguments to set_pin\n"); - return -EINVAL; - } - - if (req->pin) - LOCK_TEST_WITH_RETURN(dev, file_priv); - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, req->handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!bo) { - return -EINVAL; - } - - ret = drm_bo_set_pin(dev, bo, req->pin); - if (ret) { - drm_bo_usage_deref_unlocked(&bo); - return ret; - } - - drm_bo_fill_rep_arg(bo, rep); - drm_bo_usage_deref_unlocked(&bo); - - return 0; -} - - /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be @@ -2112,10 +2039,11 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo, mutex_unlock(&dev->struct_mutex); } - if (bo->pinned) { - DRM_ERROR("A pinned buffer was present at " + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " "cleanup. Removing flag and evicting.\n"); - bo->pinned = 0; + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } if (bo->mem.mem_type == mem_type) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 0fca3a27..80e56938 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -145,7 +145,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 726ccbe2..91378b8a 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -377,7 +377,6 @@ struct drm_buffer_object { unsigned long num_pages; /* For pinned buffers */ - int pinned; struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; @@ -472,7 +471,7 @@ extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct d extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); + extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/shared-core/drm.h b/shared-core/drm.h index 56edaeef..021a52e6 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -673,6 +673,14 @@ struct drm_fence_arg { * Can also be set in the buffer mask before validation. */ +/* + * Mask: Never evict this buffer. Not even with force. This type of buffer is only + * available to root and must be manually removed before buffer manager shutdown + * or lock. + * Flags: Acknowledge + */ +#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) + /* * Mask: Require that the buffer is placed in mappable memory when validated. * If not set the buffer may or may not be in mappable memory when validated. @@ -781,16 +789,6 @@ struct drm_bo_op_req { struct drm_bo_info_req bo_req; }; -struct drm_bo_set_pin_req { - /** Buffer object ID */ - unsigned int handle; - /** - * - 0: Unpin the given buffer object. - * - 1: Pin the given buffer object. - */ - unsigned int pin; -}; - /* * Reply flags */ @@ -856,13 +854,6 @@ struct drm_bo_op_arg { unsigned int pad64; }; -struct drm_bo_set_pin_arg { - union { - struct drm_bo_set_pin_req req; - struct drm_bo_info_rep rep; - } d; -}; - #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 @@ -976,7 +967,7 @@ struct drm_mm_init_arg { #define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg) #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_SET_PIN DRM_IOWR(0xd6, struct drm_bo_set_pin_arg) + /*@}*/ From 086c058a417317491320129d2cbeb68d1cfcfefe Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Oct 2007 10:55:21 +0200 Subject: [PATCH 399/437] Remove the op ioctl, and replace it with a setuser ioctl. Remove need for lock for now. May create races when we clean memory areas or on takedown. Needs to be fixed. Really do a validate on buffer creation in order to avoid problems with fixed memory buffers. --- libdrm/xf86drm.c | 65 ++++++----------- linux-core/drm_bo.c | 146 +++++++++++---------------------------- linux-core/drm_drv.c | 2 +- linux-core/drm_objects.h | 4 +- shared-core/drm.h | 27 +++++--- 5 files changed, 77 insertions(+), 167 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index b8f3b986..78cbb099 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2695,62 +2695,37 @@ int drmBOUnmap(int fd, drmBO *buf) return 0; } -int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class, - uint64_t flags, uint64_t mask, - unsigned hint) +int drmBOSetStatus(int fd, drmBO *buf, uint32_t fence_class, + uint64_t flags, uint64_t mask, + unsigned int hint, + unsigned int desired_tile_stride, + unsigned int tile_info) { - struct drm_bo_op_arg arg; - struct drm_bo_op_req *req = &arg.d.req; - struct drm_bo_arg_rep *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); - req->bo_req.handle = buf->handle; - req->bo_req.flags = flags; - req->bo_req.mask = mask; - req->bo_req.hint = hint; - req->bo_req.fence_class = fence_class; - req->op = drm_bo_validate; - - do{ - ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg); + req->mask = mask; + req->flags = flags; + req->handle = buf->handle; + req->hint = hint; + req->fence_class = fence_class; + req->desired_tile_stride = desired_tile_stride; + req->tile_info = tile_info; + + do { + ret = ioctl(fd, DRM_IOCTL_BO_SETSTATUS, &arg); } while (ret && errno == EAGAIN); if (ret) - return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; + return -errno; - drmBOCopyReply(&rep->bo_info, buf); - return 0; + drmBOCopyReply(rep, buf); } -int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle) -{ - struct drm_bo_op_arg arg; - struct drm_bo_op_req *req = &arg.d.req; - struct drm_bo_arg_rep *rep = &arg.d.rep; - int ret = 0; - - memset(&arg, 0, sizeof(arg)); - req->bo_req.handle = buf->handle; - req->bo_req.flags = flags; - req->arg_handle = fenceHandle; - req->op = drm_bo_fence; - - ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg); - if (ret) - return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; - return 0; -} - int drmBOInfo(int fd, drmBO *buf) { struct drm_bo_reference_info_arg arg; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 7335d258..bdeefec2 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -929,11 +929,6 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } - if (bo->type == drm_bo_type_fake && - !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { - DRM_ERROR("Fake buffers must be pinned.\n"); - return -EINVAL; - } if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR @@ -942,6 +937,12 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, return -EPERM; } + if ((new_mask & DRM_BO_FLAG_NO_MOVE)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); + return -EPERM; + } + new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1160,11 +1161,9 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, return -EINVAL; mutex_lock(&bo->mutex); - if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) { - ret = drm_bo_wait_unfenced(bo, no_wait, 0); - if (ret) - goto out; - } + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; /* * If this returns true, we are currently unmapped. @@ -1542,6 +1541,7 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, return -EINVAL; } + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, no_wait, rep); @@ -1663,8 +1663,10 @@ int drm_buffer_object_create(struct drm_device *dev, bo->mem.page_alignment = page_alignment; bo->buffer_start = buffer_start; bo->priv_flags = 0; - bo->mem.flags = 0ULL; - bo->mem.mask = 0ULL; + bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_MAPPABLE; + bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_MAPPABLE; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1678,18 +1680,8 @@ int drm_buffer_object_create(struct drm_device *dev, if (ret) goto out_err; } -#if 0 - bo->fence_class = 0; - ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); - if (ret) { - DRM_ERROR("Driver did not support given buffer permissions\n"); - goto out_err; - } - ret = drm_bo_add_ttm(bo); -#else ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); -#endif if (ret) goto out_err; @@ -1705,6 +1697,7 @@ int drm_buffer_object_create(struct drm_device *dev, } EXPORT_SYMBOL(drm_buffer_object_create); + static int drm_bo_add_user_object(struct drm_file *file_priv, struct drm_buffer_object *bo, int shareable) { @@ -1726,86 +1719,6 @@ static int drm_bo_add_user_object(struct drm_file *file_priv, return ret; } -static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) -{ - LOCK_TEST_WITH_RETURN(dev, file_priv); - return 0; -} - -int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_op_arg curarg; - struct drm_bo_op_arg *arg = data; - struct drm_bo_op_req *req = &arg->d.req; - struct drm_bo_info_rep rep; - unsigned long next = 0; - void __user *curuserarg = NULL; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - do { - if (next != 0) { - curuserarg = (void __user *)next; - if (copy_from_user(&curarg, curuserarg, - sizeof(curarg)) != 0) - return -EFAULT; - arg = &curarg; - } - - if (arg->handled) { - next = arg->next; - continue; - } - req = &arg->d.req; - ret = 0; - switch (req->op) { - case drm_bo_validate: - ret = drm_bo_lock_test(dev, file_priv); - if (ret) - break; - ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, - req->bo_req.fence_class, - req->bo_req.flags, - req->bo_req.mask, - req->bo_req.hint, - &rep, NULL); - break; - case drm_bo_fence: - ret = -EINVAL; - DRM_ERROR("Function is not implemented yet.\n"); - break; - case drm_bo_ref_fence: - ret = -EINVAL; - DRM_ERROR("Function is not implemented yet.\n"); - break; - default: - ret = -EINVAL; - } - next = arg->next; - - /* - * A signal interrupted us. Make sure the ioctl is restartable. - */ - - if (ret == -EAGAIN) - return -EAGAIN; - - arg->handled = 1; - arg->d.rep.ret = ret; - arg->d.rep.bo_info = rep; - if (arg != data) { - if (copy_to_user(curuserarg, &curarg, - sizeof(curarg)) != 0) - return -EFAULT; - } - } while (next != 0); - return 0; -} - int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_create_arg *arg = data; @@ -1821,11 +1734,6 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } -#if 0 - ret = drm_bo_lock_test(dev, file_priv); - if (ret) - goto out; -#endif ret = drm_buffer_object_create(file_priv->head->dev, req->size, drm_bo_type_dc, req->mask, @@ -1849,6 +1757,30 @@ out: return ret; } +int drm_bo_setstatus_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, + req->flags, + req->mask, + req->hint | DRM_BO_HINT_DONT_FENCE, + rep, NULL); + + if (ret) + return ret; + + return 0; +} + int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_map_wait_idle_arg *arg = data; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 80e56938..9c867f1b 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -142,7 +142,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), }; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 91378b8a..4d1ec993 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -470,9 +470,7 @@ extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); - - +extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/shared-core/drm.h b/shared-core/drm.h index 021a52e6..0ffd0ad5 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -723,6 +723,7 @@ struct drm_fence_arg { * Flags: Acknowledge. */ #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) +#define DRM_BO_FLAG_TILE (1ULL << 15) /* * Memory type flags that can be or'ed together in the mask, but only @@ -755,7 +756,6 @@ struct drm_fence_arg { /* Don't place this buffer on the unfenced list.*/ #define DRM_BO_HINT_DONT_FENCE 0x00000004 #define DRM_BO_HINT_WAIT_LAZY 0x00000008 -#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010 #define DRM_BO_INIT_MAGIC 0xfe769812 #define DRM_BO_INIT_MAJOR 0 @@ -768,6 +768,8 @@ struct drm_bo_info_req { unsigned int handle; unsigned int hint; unsigned int fence_class; + unsigned int desired_tile_stride; + unsigned int tile_info; unsigned int pad64; }; @@ -779,15 +781,6 @@ struct drm_bo_create_req { unsigned int page_alignment; }; -struct drm_bo_op_req { - enum { - drm_bo_validate, - drm_bo_fence, - drm_bo_ref_fence, - } op; - unsigned int arg_handle; - struct drm_bo_info_req bo_req; -}; /* * Reply flags @@ -844,6 +837,17 @@ struct drm_bo_map_wait_idle_arg { } d; }; +struct drm_bo_op_req { + enum { + drm_bo_validate, + drm_bo_fence, + drm_bo_ref_fence, + } op; + unsigned int arg_handle; + struct drm_bo_info_req bo_req; +}; + + struct drm_bo_op_arg { uint64_t next; union { @@ -854,6 +858,7 @@ struct drm_bo_op_arg { unsigned int pad64; }; + #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 @@ -964,7 +969,7 @@ struct drm_mm_init_arg { #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) -#define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg) +#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) From bb29ba7fa77659be284c365ebfb2f740491e8506 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Oct 2007 10:57:12 +0200 Subject: [PATCH 400/437] Only allow creator to change shared buffer mask. --- linux-core/drm_bo.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index bdeefec2..d40be07f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1540,8 +1540,16 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, if (!bo) { return -EINVAL; } - + /* + * Only allow creator to change shared buffer mask. + */ + + if (bo->base.owner != file_priv) { + flags = 0x0; + mask = 0x0; + } + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, no_wait, rep); From 36120264ca8f43078f8748e022faeb9471edcb36 Mon Sep 17 00:00:00 2001 From: Jung-uk Kim Date: Wed, 17 Oct 2007 12:50:29 -0700 Subject: [PATCH 401/437] Bug #11870: FreeBSD hardware lock cleanup fix with multiple opens by a process. Previously, the lock would get released on the first close by the X Server (during AIGLX setup), and the Radeon driver would then hang in initialization due to unexpected failure in DRM calls that required the lock to be held. Based on a patch by Kostik Belousov. --- bsd-core/drm_drv.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index c36b78aa..d6868b9c 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -538,6 +538,7 @@ static int drm_load(drm_device_t *dev) if (dev->driver.load != NULL) { DRM_LOCK(); + /* Shared code returns -errno. */ retcode = -dev->driver.load(dev, dev->id_entry->driver_private); DRM_UNLOCK(); @@ -720,6 +721,9 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) return EINVAL; } + if (--file_priv->refs != 0) + goto done; + if (dev->driver.preclose != NULL) dev->driver.preclose(dev, file_priv); @@ -795,17 +799,16 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) dev->buf_pgid = 0; #endif /* __NetBSD__ || __OpenBSD__ */ - if (--file_priv->refs == 0) { - if (dev->driver.postclose != NULL) - dev->driver.postclose(dev, file_priv); - TAILQ_REMOVE(&dev->files, file_priv, link); - free(file_priv, M_DRM); - } + if (dev->driver.postclose != NULL) + dev->driver.postclose(dev, file_priv); + TAILQ_REMOVE(&dev->files, file_priv, link); + free(file_priv, M_DRM); /* ======================================================== * End inline drm_release */ +done: atomic_inc( &dev->counts[_DRM_STAT_CLOSES] ); #ifdef __FreeBSD__ device_unbusy(dev->device); From e7523d337997018a86530266a8f3f88dd061c138 Mon Sep 17 00:00:00 2001 From: Robert Noland Date: Wed, 17 Oct 2007 13:20:46 -0700 Subject: [PATCH 402/437] Fix a race in the auth test where client prevents server from being master. --- tests/auth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/auth.c b/tests/auth.c index 4160d1de..9b6fca94 100644 --- a/tests/auth.c +++ b/tests/auth.c @@ -69,10 +69,10 @@ static void client() int drmfd, ret; /* XXX: Should make sure we open the same DRM as the master */ - drmfd = drm_open_any(); - wait_event(0, SERVER_READY); + drmfd = drm_open_any(); + /* Get a client magic number and pass it to the master for auth. */ auth.magic = 0; /* Quiet valgrind */ ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth); From 2c5c18fbd394f419a9cf650720a1187440c643cd Mon Sep 17 00:00:00 2001 From: Robert Noland Date: Wed, 17 Oct 2007 13:25:31 -0700 Subject: [PATCH 403/437] Bug #12838: Fix lock test client vs. server master race and misplaced closes. --- tests/lock.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/lock.c b/tests/lock.c index 3f627558..86caa281 100644 --- a/tests/lock.c +++ b/tests/lock.c @@ -87,8 +87,6 @@ client_auth(int drmfd) struct drm_auth auth; int ret; - wait_event(0, SERVER_READY); - /* Get a client magic number and pass it to the master for auth. */ ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth); if (ret == -1) @@ -172,8 +170,6 @@ static void test_open_close_locked(drmfd) ret = drmUnlock(drmfd, lock1); if (ret != 0) errx(1, "lock lost during open/close by same pid"); - - close(drmfd); } static void client() @@ -181,6 +177,8 @@ static void client() int drmfd, ret; unsigned int time; + wait_event(0, SERVER_READY); + /* XXX: Should make sure we open the same DRM as the master */ drmfd = drm_open_any(); @@ -201,6 +199,7 @@ static void client() send_event(0, CLIENT_LOCKED); ret = write(commfd[0], &time, sizeof(time)); + close(drmfd); exit(0); } @@ -238,6 +237,8 @@ static void server() if (client_time < unlock_time) errx(1, "Client took lock before server released it"); + + close(drmfd); } int main(int argc, char **argv) From cf2d1bba5513ae38d8efbaf50251fc136ed1d414 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 19 Oct 2007 16:24:36 +0200 Subject: [PATCH 404/437] Remove the clean_unfenced function. Change the restriction that non-creators can't change the buffer flags to non-creators can't change EVICT and NO_MOVE flags. --- linux-core/drm_bo.c | 65 +++------------------------------------------ 1 file changed, 4 insertions(+), 61 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index d40be07f..89c014e3 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1545,10 +1545,9 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, * Only allow creator to change shared buffer mask. */ - if (bo->base.owner != file_priv) { - flags = 0x0; - mask = 0x0; - } + if (bo->base.owner != file_priv) + mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, no_wait, rep); @@ -1899,60 +1898,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } -/** - *Clean the unfenced list and put on regular LRU. - *This is part of the memory manager cleanup and should only be - *called with the DRI lock held. - *Call dev->struct_sem locked. - */ - -static void drm_bo_clean_unfenced(struct drm_device *dev) -{ - struct drm_buffer_manager *bm = &dev->bm; - struct list_head *head, *list; - struct drm_buffer_object *entry; - struct drm_fence_object *fence; - - head = &bm->unfenced; - - if (list_empty(head)) - return; - - DRM_ERROR("Clean unfenced\n"); - - if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) { - - /* - * Fixme: Should really wait here. - */ - } - - if (fence) - drm_fence_usage_deref_locked(&fence); - - if (list_empty(head)) - return; - - DRM_ERROR("Really clean unfenced\n"); - - list = head->next; - while(list != head) { - prefetch(list->next); - entry = list_entry(list, struct drm_buffer_object, lru); - - atomic_inc(&entry->usage); - mutex_unlock(&dev->struct_mutex); - mutex_lock(&entry->mutex); - mutex_lock(&dev->struct_mutex); - - list_del(&entry->lru); - DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - drm_bo_add_to_lru(entry); - mutex_unlock(&entry->mutex); - list = head->next; - } -} - static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) @@ -2103,8 +2048,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) ret = 0; if (mem_type > 0) { - - drm_bo_clean_unfenced(dev); + BUG_ON(!list_empty(&bm->unfenced)); drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); @@ -2142,7 +2086,6 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) return 0; } - drm_bo_clean_unfenced(dev); ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); if (ret) return ret; From 733ff568346e8fe40e9790f21f8b7efc659d5d12 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 19 Oct 2007 16:28:47 +0200 Subject: [PATCH 405/437] No fence_class argument on drmBOSetStatus since it's not associated with a particular command submission. --- libdrm/xf86drm.c | 3 +-- linux-core/drm_bo.c | 13 +++++++++---- linux-core/drm_objects.h | 1 + 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 78cbb099..a8e054d9 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2695,7 +2695,7 @@ int drmBOUnmap(int fd, drmBO *buf) return 0; } -int drmBOSetStatus(int fd, drmBO *buf, uint32_t fence_class, +int drmBOSetStatus(int fd, drmBO *buf, uint64_t flags, uint64_t mask, unsigned int hint, unsigned int desired_tile_stride, @@ -2711,7 +2711,6 @@ int drmBOSetStatus(int fd, drmBO *buf, uint32_t fence_class, req->flags = flags; req->handle = buf->handle; req->hint = hint; - req->fence_class = fence_class; req->desired_tile_stride = desired_tile_stride; req->tile_info = tile_info; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 89c014e3..cc4743dc 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1524,7 +1524,9 @@ EXPORT_SYMBOL(drm_bo_do_validate); int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, uint32_t fence_class, - uint64_t flags, uint64_t mask, uint32_t hint, + uint64_t flags, uint64_t mask, + uint32_t hint, + int use_old_fence_class, struct drm_bo_info_rep * rep, struct drm_buffer_object **bo_rep) { @@ -1537,10 +1539,12 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); - if (!bo) { + if (!bo) return -EINVAL; - } - + + if (use_old_fence_class) + fence_class = bo->fence_class; + /* * Only allow creator to change shared buffer mask. */ @@ -1780,6 +1784,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, req->flags, req->mask, req->hint | DRM_BO_HINT_DONT_FENCE, + 1, rep, NULL); if (ret) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 4d1ec993..f153b84a 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -510,6 +510,7 @@ extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, + int use_old_fence_class, struct drm_bo_info_rep * rep, struct drm_buffer_object **bo_rep); extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, From c0e3537e77f1765001f665f93e5349ccd0f1d092 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 19 Oct 2007 16:44:12 +0200 Subject: [PATCH 406/437] Some comment updates pending removal of the init mutex. --- linux-core/drm_bo.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index cc4743dc..35ac8a0a 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1072,13 +1072,6 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo) /* * Wait until a buffer, scheduled to be fenced moves off the unfenced list. * Until then, we cannot really do anything with it except delete it. - * The unfenced list is a PITA, and the operations - * 1) validating - * 2) submitting commands - * 3) fencing - * Should really be an atomic operation. - * We now "solve" this problem by keeping - * the buffer "unfenced" after validating, but before fencing. */ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, @@ -2144,8 +2137,10 @@ int drm_bo_init_mm(struct drm_device * dev, EXPORT_SYMBOL(drm_bo_init_mm); /* - * This is called from lastclose, so we don't need to bother about - * any clients still running when we set the initialized flag to zero. + * This function is intended to be called on drm driver unload. + * If you decide to call it from lastclose, you must protect the call + * from a potentially racing drm_bo_driver_init in firstopen. + * (This may happen on X server restart). */ int drm_bo_driver_finish(struct drm_device * dev) @@ -2199,6 +2194,13 @@ int drm_bo_driver_finish(struct drm_device * dev) return ret; } +/* + * This function is intended to be called on drm driver load. + * If you decide to call it from firstopen, you must protect the call + * from a potentially racing drm_bo_driver_finish in lastclose. + * (This may happen on X server restart). + */ + int drm_bo_driver_init(struct drm_device * dev) { struct drm_bo_driver *driver = dev->driver->bo_driver; From 48b5eaf303b60077faed09db77785d7a544ac335 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 20 Oct 2007 16:49:43 +0200 Subject: [PATCH 407/437] Simple replacement for hardware lock in some cases. Fix i915 since last commit. --- libdrm/xf86drm.c | 11 ++++- linux-core/Makefile.kernel | 2 +- linux-core/drm_bo.c | 65 +++++++++++++++---------- linux-core/drm_bo_lock2.c | 99 ++++++++++++++++++++++++++++++++++++++ linux-core/drm_objects.h | 27 ++++++++++- linux-core/drm_stub.c | 1 - shared-core/drm.h | 1 + shared-core/i915_dma.c | 1 + 8 files changed, 176 insertions(+), 31 deletions(-) create mode 100644 linux-core/drm_bo_lock2.c diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index a8e054d9..b61c2250 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2815,13 +2815,19 @@ int drmMMTakedown(int fd, unsigned memType) return 0; } -int drmMMLock(int fd, unsigned memType) +/* + * If this function returns an error, and lockBM was set to 1, + * the buffer manager is NOT locked. + */ + +int drmMMLock(int fd, unsigned memType, int lockBM) { struct drm_mm_type_arg arg; int ret; memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; + arg.lock_unlock_bm = lock_bm; do{ ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg); @@ -2830,7 +2836,7 @@ int drmMMLock(int fd, unsigned memType) return (ret) ? -errno : 0; } -int drmMMUnlock(int fd, unsigned memType) +int drmMMUnlock(int fd, unsigned memType, int unlockBM) { struct drm_mm_type_arg arg; int ret; @@ -2838,6 +2844,7 @@ int drmMMUnlock(int fd, unsigned memType) memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; + arg.lock_unlock_bm = unlockBM; do{ ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg); diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 715454bc..86b225f3 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ - drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock2.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 35ac8a0a..a2a0291d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1768,11 +1768,16 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } + ret = drm_bo_read_lock(&dev->bm.bm_lock); + if (ret) + return ret; + ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, req->flags, req->mask, @@ -1780,6 +1785,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, 1, rep, NULL); + (void) drm_bo_read_unlock(&dev->bm.bm_lock); if (ret) return ret; @@ -1898,7 +1904,8 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, - int free_pinned, int allow_errors) + int free_pinned, + int allow_errors) { struct drm_device *dev = bo->dev; int ret = 0; @@ -2150,7 +2157,6 @@ int drm_bo_driver_finish(struct drm_device * dev) unsigned i = DRM_BO_MEM_TYPES; struct drm_mem_type_manager *man; - mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); if (!bm->initialized) @@ -2190,7 +2196,6 @@ int drm_bo_driver_finish(struct drm_device * dev) } out: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); return ret; } @@ -2207,7 +2212,7 @@ int drm_bo_driver_init(struct drm_device * dev) struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - mutex_lock(&dev->bm.init_mutex); + drm_bo_init_lock(&bm->bm_lock); mutex_lock(&dev->struct_mutex); if (!driver) goto out_unlock; @@ -2233,7 +2238,6 @@ int drm_bo_driver_init(struct drm_device * dev) INIT_LIST_HEAD(&bm->ddestroy); out_unlock: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); return ret; } @@ -2252,6 +2256,10 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ } ret = -EINVAL; + ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + if (ret) + return ret; + if (arg->magic != DRM_BO_INIT_MAGIC) { DRM_ERROR("You are using an old libdrm that is not compatible with\n" "\tthe kernel DRM module. Please upgrade your libdrm.\n"); @@ -2271,7 +2279,6 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); if (!bm->initialized) { DRM_ERROR("DRM memory manager was not initialized.\n"); @@ -2286,7 +2293,8 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ out: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); + (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); + if (ret) return ret; @@ -2305,8 +2313,10 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, file_priv); - mutex_lock(&dev->bm.init_mutex); + ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + if (ret) + return ret; + mutex_lock(&dev->struct_mutex); ret = -EINVAL; if (!bm->initialized) { @@ -2324,7 +2334,8 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f } out: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); + (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); + if (ret) return ret; @@ -2342,20 +2353,28 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, file_priv); - mutex_lock(&dev->bm.init_mutex); + if (arg->lock_unlock_bm) { + ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv); + if (ret) + return ret; + } + mutex_lock(&dev->struct_mutex); ret = drm_bo_lock_mm(dev, arg->mem_type); mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); - if (ret) + if (ret) { + (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); return ret; + } return 0; } -int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +int drm_mm_unlock_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file_priv) { + struct drm_mm_type_arg *arg = data; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2364,16 +2383,12 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, file_priv); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = 0; - - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); - if (ret) - return ret; - + if (arg->lock_unlock_bm) { + ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); + if (ret) + return ret; + } + return 0; } diff --git a/linux-core/drm_bo_lock2.c b/linux-core/drm_bo_lock2.c new file mode 100644 index 00000000..73e58bc0 --- /dev/null +++ b/linux-core/drm_bo_lock2.c @@ -0,0 +1,99 @@ +#include "drmP.h" + +void drm_bo_init_lock(struct drm_bo_lock *lock) +{ + DRM_INIT_WAITQUEUE(&lock->queue); + atomic_set(&lock->write_lock_pending, 0); + atomic_set(&lock->readers, 0); + +} + +void drm_bo_read_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_add_negative(-1, &lock->readers) == 0)) + BUG(); + if (atomic_read(&lock->readers) == 0) + wake_up_interruptible(&lock->queue); +} + +int drm_bo_read_lock(struct drm_bo_lock *lock) +{ + while( unlikely(atomic_read(&lock->write_lock_pending) != 0)) { + int ret; + ret = wait_event_interruptible + (lock->queue, + atomic_read(&lock->write_lock_pending) == 0); + if (ret) + return -EAGAIN; + } + + while( unlikely (!atomic_add_unless(&lock->readers, 1, -1))) { + int ret; + ret = wait_event_interruptible + (lock->queue, + atomic_add_unless(&lock->readers, 1, -1)); + if (ret) + return -EAGAIN; + } + return 0; +} + +static int __drm_bo_write_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) + return -EINVAL; + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) + return -EINVAL; + wake_up_interruptible(&lock->queue); + return 0; +} + +static void drm_bo_write_lock_remove(struct drm_file *file_priv, + struct drm_user_object *item) +{ + struct drm_bo_lock *lock = + container_of(item, struct drm_bo_lock, base); + int ret; + + ret = __drm_bo_write_unlock(lock); + BUG_ON(ret); +} + +int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + int ret = 0; + struct drm_device *dev; + + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) + return -EINVAL; + + while(unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + ret = wait_event_interruptible + (lock->queue, + atomic_cmpxchg(&lock->readers, 0, -1) == 0); + + if (ret) { + atomic_set(&lock->write_lock_pending, 0); + wake_up_interruptible(&lock->queue); + return -EAGAIN; + } + } + + dev = file_priv->head->dev; + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(file_priv, &lock->base, 0); + lock->base.remove = &drm_bo_write_lock_remove; + lock->base.type = drm_lock_type; + if (ret) + (void) __drm_bo_write_unlock(lock); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + + +int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + return drm_user_object_unref(file_priv, lock->base.hash.key, + drm_lock_type); +} diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index f153b84a..0b937dc0 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -43,6 +43,7 @@ struct drm_bo_mem_reg; enum drm_object_type { drm_fence_type, drm_buffer_type, + drm_lock_type, /* * Add other user space object types here. */ @@ -414,6 +415,13 @@ struct drm_mem_type_manager { void *io_addr; }; +struct drm_bo_lock { + struct drm_user_object base; + wait_queue_head_t queue; + atomic_t write_lock_pending; + atomic_t readers; +}; + #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ @@ -423,8 +431,8 @@ struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ struct drm_buffer_manager { - struct mutex init_mutex; - struct mutex evict_mutex; + struct drm_bo_lock bm_lock; + struct mutex evict_mutex; int nice_mode; int initialized; struct drm_file *last_to_validate; @@ -603,6 +611,21 @@ extern void drm_regs_init(struct drm_reg_manager *manager, const void *), void (*reg_destroy)(struct drm_reg *)); +/* + * drm_bo_lock.c + * Simple replacement for the hardware lock on buffer manager init and clean. + */ + + +extern void drm_bo_init_lock(struct drm_bo_lock *lock); +extern void drm_bo_read_unlock(struct drm_bo_lock *lock); +extern int drm_bo_read_lock(struct drm_bo_lock *lock); +extern int drm_bo_write_lock(struct drm_bo_lock *lock, + struct drm_file *file_priv); + +extern int drm_bo_write_unlock(struct drm_bo_lock *lock, + struct drm_file *file_priv); + #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ BUG_ON(!mutex_is_locked(_mutex) || \ diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 07ea91e0..9e140ac2 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -72,7 +72,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); - mutex_init(&dev->bm.init_mutex); mutex_init(&dev->bm.evict_mutex); idr_init(&dev->drw_idr); diff --git a/shared-core/drm.h b/shared-core/drm.h index 0ffd0ad5..f88192ff 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -872,6 +872,7 @@ struct drm_bo_op_arg { struct drm_mm_type_arg { unsigned int mem_type; + int lock_unlock_bm; }; struct drm_mm_init_arg { diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 5a51f6ef..99d98cd3 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -930,6 +930,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv, req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, + 0, &rep.bo_info, &buffers[buf_count]); From 3b19b50cb5cd31e60eb03e99dd1109b6d0f5b8a3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 21 Oct 2007 12:20:56 +0200 Subject: [PATCH 408/437] Remove the need for the hardware lock in the buffer manager. Add interface entry cleaning a memory type without touching NO_EVICT buffers. --- libdrm/xf86drm.c | 7 +- libdrm/xf86mm.h | 4 +- linux-core/Makefile.kernel | 2 +- linux-core/drm_bo.c | 11 ++- linux-core/drm_bo_lock.c | 178 +++++++++++++++++++++++++++++++++++++ linux-core/drm_compat.c | 3 + linux-core/drm_vm.c | 11 ++- shared-core/drm.h | 5 +- 8 files changed, 209 insertions(+), 12 deletions(-) create mode 100644 linux-core/drm_bo_lock.c diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index b61c2250..ee0043cb 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2820,14 +2820,15 @@ int drmMMTakedown(int fd, unsigned memType) * the buffer manager is NOT locked. */ -int drmMMLock(int fd, unsigned memType, int lockBM) +int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict) { struct drm_mm_type_arg arg; int ret; memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; - arg.lock_unlock_bm = lock_bm; + arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; + arg.lock_flags |= (ignoreNoEvict) = DRM_BO_LOCK_IGNORE_NO_EVICT; do{ ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg); @@ -2844,7 +2845,7 @@ int drmMMUnlock(int fd, unsigned memType, int unlockBM) memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; - arg.lock_unlock_bm = unlockBM; + arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; do{ ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg); diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index f8ec1d75..0516bd32 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -172,8 +172,8 @@ extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint); extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, unsigned memType); extern int drmMMTakedown(int fd, unsigned memType); -extern int drmMMLock(int fd, unsigned memType); -extern int drmMMUnlock(int fd, unsigned memType); +extern int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict); +extern int drmMMUnlock(int fd, unsigned memType, int unlockBM); #endif diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 86b225f3..79136431 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ - drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock2.o + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a2a0291d..e6eb6320 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2255,11 +2255,11 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - ret = -EINVAL; ret = drm_bo_write_lock(&bm->bm_lock, file_priv); if (ret) return ret; + ret = -EINVAL; if (arg->magic != DRM_BO_INIT_MAGIC) { DRM_ERROR("You are using an old libdrm that is not compatible with\n" "\tthe kernel DRM module. Please upgrade your libdrm.\n"); @@ -2353,7 +2353,12 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - if (arg->lock_unlock_bm) { + if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) { + DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n"); + return -EINVAL; + } + + if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv); if (ret) return ret; @@ -2383,7 +2388,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, return -EINVAL; } - if (arg->lock_unlock_bm) { + if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); if (ret) return ret; diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c new file mode 100644 index 00000000..e5a86826 --- /dev/null +++ b/linux-core/drm_bo_lock.c @@ -0,0 +1,178 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +/* + * This file implements a simple replacement for the buffer manager use + * of the heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode is fast, and + * intended for in-kernel use only. + * Taking it in write mode is slow. + * + * The write mode is used only when there is a need to block all + * user-space processes from allocating a + * new memory area. + * Typical use in write mode is X server VT switching, and it's allowed + * to leave kernel space with the write lock held. If a user-space process + * dies while having the write-lock, it will be released during the file + * descriptor release. + * + * The read lock is typically placed at the start of an IOCTL- or + * user-space callable function that may end up allocating a memory area. + * This includes setstatus, super-ioctls and no_pfn; the latter may move + * unmappable regions to mappable. It's a bug to leave kernel space with the + * read lock held. + * + * Both read- and write lock taking is interruptible for low signal-delivery + * latency. The locking functions will return -EAGAIN if interrupted by a + * signal. + * + * Locking order: The lock should be taken BEFORE any kernel mutexes + * or spinlocks. + */ + +#include "drmP.h" + +void drm_bo_init_lock(struct drm_bo_lock *lock) +{ + DRM_INIT_WAITQUEUE(&lock->queue); + atomic_set(&lock->write_lock_pending, 0); + atomic_set(&lock->readers, 0); +} + +void drm_bo_read_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_add_negative(-1, &lock->readers))) + BUG(); + if (atomic_read(&lock->readers) == 0) + wake_up_interruptible(&lock->queue); +} + +EXPORT_SYMBOL(drm_bo_read_unlock); + +int drm_bo_read_lock(struct drm_bo_lock *lock) +{ + while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { + int ret; + ret = wait_event_interruptible + (lock->queue, atomic_read(&lock->write_lock_pending) == 0); + if (ret) + return -EAGAIN; + } + + while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { + int ret; + ret = wait_event_interruptible + (lock->queue, atomic_add_unless(&lock->readers, 1, -1)); + if (ret) + return -EAGAIN; + } + return 0; +} + +EXPORT_SYMBOL(drm_bo_read_lock); + +static int __drm_bo_write_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) + return -EINVAL; + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) + return -EINVAL; + wake_up_interruptible(&lock->queue); + return 0; +} + +static void drm_bo_write_lock_remove(struct drm_file *file_priv, + struct drm_user_object *item) +{ + struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base); + int ret; + + ret = __drm_bo_write_unlock(lock); + BUG_ON(ret); +} + +int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + int ret = 0; + struct drm_device *dev; + + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) { + return -EINVAL; + } + + while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + ret = wait_event_interruptible + (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0); + + if (ret) { + atomic_set(&lock->write_lock_pending, 0); + wake_up_interruptible(&lock->queue); + return -EAGAIN; + } + } + + /* + * Add a dummy user-object, the destructor of which will + * make sure the lock is released if the client dies + * while holding it. + */ + + dev = file_priv->head->dev; + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(file_priv, &lock->base, 0); + lock->base.remove = &drm_bo_write_lock_remove; + lock->base.type = drm_lock_type; + if (ret) { + (void)__drm_bo_write_unlock(lock); + } + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_ref_object *ro; + + mutex_lock(&dev->struct_mutex); + + if (lock->base.owner != file_priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE); + BUG_ON(!ro); + drm_remove_ref_object(file_priv, ro); + lock->base.owner = NULL; + + mutex_unlock(&dev->struct_mutex); + return 0; +} diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index e51aedb7..ae44e500 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -212,6 +212,8 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long bus_offset; unsigned long bus_size; + dev = bo->dev; + while(drm_bo_read_lock(&dev->bm.bm_lock)); mutex_lock(&bo->mutex); @@ -289,6 +291,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, data->type = VM_FAULT_OOM; out_unlock: mutex_unlock(&bo->mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); return NULL; } diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index c4e790ef..d2554f31 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -728,10 +728,17 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPFN_SIGBUS; - err = mutex_lock_interruptible(&bo->mutex); + dev = bo->dev; + err = drm_bo_read_lock(&dev->bm.bm_lock); if (err) return NOPFN_REFAULT; + err = mutex_lock_interruptible(&bo->mutex); + if (err) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return NOPFN_REFAULT; + } + err = drm_bo_wait(bo, 0, 0, 0); if (err) { ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; @@ -754,7 +761,6 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, } } - dev = bo->dev; err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, &bus_size); @@ -792,6 +798,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, } out_unlock: mutex_unlock(&bo->mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); return ret; } #endif diff --git a/shared-core/drm.h b/shared-core/drm.h index f88192ff..80c1a3e2 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -870,9 +870,12 @@ struct drm_bo_op_arg { #define DRM_BO_MEM_TYPES 8 /* For now. */ +#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) +#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) + struct drm_mm_type_arg { unsigned int mem_type; - int lock_unlock_bm; + unsigned int lock_flags; }; struct drm_mm_init_arg { From 9ddff6d15fdff571193aac10ef81e67798fd712d Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 21 Oct 2007 12:26:26 +0200 Subject: [PATCH 409/437] Adapt i915 super-ioctl for lock-free operation. --- shared-core/i915_dma.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 99d98cd3..14a91f36 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1010,8 +1010,6 @@ static int i915_execbuffer(struct drm_device *dev, void *data, } - LOCK_TEST_WITH_RETURN(dev, file_priv); - if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect))) @@ -1020,11 +1018,18 @@ static int i915_execbuffer(struct drm_device *dev, void *data, if (exec_buf->num_buffers > dev_priv->max_validate_buffers) return -EINVAL; + + ret = drm_bo_read_lock(&dev->bm.bm_lock); + if (ret) + return ret; + num_buffers = exec_buf->num_buffers; buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER); - if (!buffers) + if (!buffers) { + drm_bo_read_unlock(&dev->bm.bm_lock); return -ENOMEM; + } /* validate buffer list + fixup relocations */ ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, @@ -1068,7 +1073,7 @@ out_err0: out_free: drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER); - + drm_bo_read_unlock(&dev->bm.bm_lock); return ret; } #endif From 4ebe7471cbfdd6afa33485ea9ec55812da38445f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 21 Oct 2007 12:31:00 +0200 Subject: [PATCH 410/437] Disable i915 accelerated blit copy moves for now until we can guarantee that it doesn't clash with the X server. --- linux-core/i915_buffer.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index f3ba7ce5..f81def8f 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -121,6 +121,8 @@ uint32_t i915_evict_mask(struct drm_buffer_object *bo) } } +#if 0 /* See comment below */ + static void i915_emit_copy_blit(struct drm_device * dev, uint32_t src_offset, uint32_t dst_offset, @@ -221,6 +223,16 @@ out_cleanup: return ret; } +#endif + +/* + * Disable i915_move_flip for now, since we can't guarantee that the hardware lock + * is held here. To re-enable we need to make sure either + * a) The X server is using DRM to submit commands to the ring, or + * b) DRM can use the HP ring for these blits. This means i915 needs to implement + * a new ring submission mechanism and fence class. + */ + int i915_move(struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { @@ -229,10 +241,10 @@ int i915_move(struct drm_buffer_object * bo, if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (i915_move_flip(bo, evict, no_wait, new_mem)) + if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else { - if (i915_move_blit(bo, evict, no_wait, new_mem)) + if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } return 0; From 6420d33b02db0da900140c238bb35f13abc182e7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 21 Oct 2007 12:57:43 +0200 Subject: [PATCH 411/437] Get the lock flags right in libdrm. --- libdrm/xf86drm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index ee0043cb..a50eff9b 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2828,7 +2828,7 @@ int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict) memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; - arg.lock_flags |= (ignoreNoEvict) = DRM_BO_LOCK_IGNORE_NO_EVICT; + arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0; do{ ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg); From 22883ff26b8a45ab2bec60accc4b822cf6b4f214 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 22 Oct 2007 11:54:41 +1100 Subject: [PATCH 412/437] i915: split reloc execution into separate function --- shared-core/i915_dma.c | 65 +++++++++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 5a51f6ef..d0d65f8c 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -873,6 +873,43 @@ out: return ret; } +static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, + drm_handle_t buf_reloc_handle, + struct drm_buffer_object **buffers, + uint32_t buf_count) +{ + struct drm_device *dev = file_priv->head->dev; + struct i915_relocatee_info relocatee; + int ret = 0; + + memset(&relocatee, 0, sizeof(relocatee)); + + mutex_lock(&dev->struct_mutex); + relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!relocatee.buf) { + DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); + ret = -EINVAL; + goto out_err; + } + + while (buf_reloc_handle) { + ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count); + if (ret) { + DRM_ERROR("process relocs failed\n"); + break; + } + } + + drm_bo_kunmap(&relocatee.kmap); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&relocatee.buf); + mutex_unlock(&dev->struct_mutex); + +out_err: + return ret; +} + /* * Validate, add fence and relocate a block of bos from a userspace list */ @@ -889,7 +926,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv, unsigned buf_count = 0; struct drm_device *dev = file_priv->head->dev; uint32_t buf_reloc_handle, buf_handle; - struct i915_relocatee_info relocatee; + do { if (buf_count >= *num_buffers) { @@ -950,33 +987,9 @@ int i915_validate_buffer_list(struct drm_file *file_priv, buf_count++; if (buf_reloc_handle) { - memset(&relocatee, 0, sizeof(relocatee)); - - mutex_lock(&dev->struct_mutex); - relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!relocatee.buf) { - DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); - ret = -EINVAL; - goto out_err; - } - - while (buf_reloc_handle) { - ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count); - if (ret) { - DRM_ERROR("process relocs failed\n"); - break; - } - } - - drm_bo_kunmap(&relocatee.kmap); - mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(&relocatee.buf); - mutex_unlock(&dev->struct_mutex); - + ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count); if (ret) goto out_err; - } } while (next != 0); *num_buffers = buf_count; From d4ce4be0dad516caa43fddcd8a56c28f264c9c2a Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 22 Oct 2007 13:16:51 +0200 Subject: [PATCH 413/437] Setstatus header. --- libdrm/xf86drm.c | 1 + libdrm/xf86mm.h | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index a50eff9b..82b77d7f 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2701,6 +2701,7 @@ int drmBOSetStatus(int fd, drmBO *buf, unsigned int desired_tile_stride, unsigned int tile_info) { + struct drm_bo_map_wait_idle_arg arg; struct drm_bo_info_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index 0516bd32..b0923440 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -174,6 +174,11 @@ extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, extern int drmMMTakedown(int fd, unsigned memType); extern int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict); extern int drmMMUnlock(int fd, unsigned memType, int unlockBM); +extern int drmBOSetStatus(int fd, drmBO *buf, + uint64_t flags, uint64_t mask, + unsigned int hint, + unsigned int desired_tile_stride, + unsigned int tile_info); #endif From 919c886b2b7728768720aac93e0f6fd1acb8b2df Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 22 Oct 2007 18:59:37 +0200 Subject: [PATCH 414/437] A cmdbuf mutex to implement validate-submit-fence atomicity in the absence of a hardware lock. --- shared-core/i915_dma.c | 15 +++++++++++++++ shared-core/i915_drv.h | 1 + 2 files changed, 16 insertions(+) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 14a91f36..d07be6e0 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -213,6 +213,7 @@ static int i915_initialize(struct drm_device * dev, } DRM_DEBUG("Enabled hardware status page\n"); dev->dev_private = (void *)dev_priv; + mutex_init(&dev_priv->cmdbuf_mutex); return 0; } @@ -1023,11 +1024,23 @@ static int i915_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; + /* + * The cmdbuf_mutex makes sure the validate-submit-fence + * operation is atomic. + */ + + ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); + if (ret) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return -EAGAIN; + } + num_buffers = exec_buf->num_buffers; buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER); if (!buffers) { drm_bo_read_unlock(&dev->bm.bm_lock); + mutex_unlock(&dev_priv->cmdbuf_mutex); return -ENOMEM; } @@ -1073,6 +1086,8 @@ out_err0: out_free: drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER); + + mutex_unlock(&dev_priv->cmdbuf_mutex); drm_bo_read_unlock(&dev->bm.bm_lock); return ret; } diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e8f18798..817288b6 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -139,6 +139,7 @@ typedef struct drm_i915_private { #ifdef I915_HAVE_BUFFER void *agp_iomap; unsigned int max_validate_buffers; + struct mutex cmdbuf_mutex; #endif DRM_SPINTYPE swaps_lock; From 824330d0e652e0bab1851437f120c7e76feee832 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 22 Oct 2007 19:09:36 +0200 Subject: [PATCH 415/437] Don't clobber the unfenced list with DONT_FENCE operations. --- linux-core/drm_bo.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index e6eb6320..9598e353 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1299,10 +1299,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, mutex_lock(&bm->evict_mutex); mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - list_add_tail(&bo->lru, &bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, - _DRM_BO_FLAG_UNFENCED); + list_del_init(&bo->lru); mutex_unlock(&dev->struct_mutex); /* @@ -1322,10 +1319,6 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - DRM_WAKEUP(&bo->event_queue); - list_del(&bo->lru); - drm_bo_add_to_lru(bo); mutex_unlock(&dev->struct_mutex); } From 3d4b32e91647f61712d54a46f0a173deff46e6b4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 22 Oct 2007 19:16:39 +0200 Subject: [PATCH 416/437] Remove duplicate file. --- linux-core/drm_bo_lock2.c | 99 --------------------------------------- 1 file changed, 99 deletions(-) delete mode 100644 linux-core/drm_bo_lock2.c diff --git a/linux-core/drm_bo_lock2.c b/linux-core/drm_bo_lock2.c deleted file mode 100644 index 73e58bc0..00000000 --- a/linux-core/drm_bo_lock2.c +++ /dev/null @@ -1,99 +0,0 @@ -#include "drmP.h" - -void drm_bo_init_lock(struct drm_bo_lock *lock) -{ - DRM_INIT_WAITQUEUE(&lock->queue); - atomic_set(&lock->write_lock_pending, 0); - atomic_set(&lock->readers, 0); - -} - -void drm_bo_read_unlock(struct drm_bo_lock *lock) -{ - if (unlikely(atomic_add_negative(-1, &lock->readers) == 0)) - BUG(); - if (atomic_read(&lock->readers) == 0) - wake_up_interruptible(&lock->queue); -} - -int drm_bo_read_lock(struct drm_bo_lock *lock) -{ - while( unlikely(atomic_read(&lock->write_lock_pending) != 0)) { - int ret; - ret = wait_event_interruptible - (lock->queue, - atomic_read(&lock->write_lock_pending) == 0); - if (ret) - return -EAGAIN; - } - - while( unlikely (!atomic_add_unless(&lock->readers, 1, -1))) { - int ret; - ret = wait_event_interruptible - (lock->queue, - atomic_add_unless(&lock->readers, 1, -1)); - if (ret) - return -EAGAIN; - } - return 0; -} - -static int __drm_bo_write_unlock(struct drm_bo_lock *lock) -{ - if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) - return -EINVAL; - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) - return -EINVAL; - wake_up_interruptible(&lock->queue); - return 0; -} - -static void drm_bo_write_lock_remove(struct drm_file *file_priv, - struct drm_user_object *item) -{ - struct drm_bo_lock *lock = - container_of(item, struct drm_bo_lock, base); - int ret; - - ret = __drm_bo_write_unlock(lock); - BUG_ON(ret); -} - -int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) -{ - int ret = 0; - struct drm_device *dev; - - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) - return -EINVAL; - - while(unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { - ret = wait_event_interruptible - (lock->queue, - atomic_cmpxchg(&lock->readers, 0, -1) == 0); - - if (ret) { - atomic_set(&lock->write_lock_pending, 0); - wake_up_interruptible(&lock->queue); - return -EAGAIN; - } - } - - dev = file_priv->head->dev; - mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(file_priv, &lock->base, 0); - lock->base.remove = &drm_bo_write_lock_remove; - lock->base.type = drm_lock_type; - if (ret) - (void) __drm_bo_write_unlock(lock); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - - -int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) -{ - return drm_user_object_unref(file_priv, lock->base.hash.key, - drm_lock_type); -} From 9a115080e870f8196adef4a19598343e63e61e45 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Tue, 23 Oct 2007 02:18:56 +0200 Subject: [PATCH 417/437] nouveau: fix IGP --- shared-core/nouveau_mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index e2f0b38d..448b69d3 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -223,7 +223,7 @@ void nouveau_mem_close(struct drm_device *dev) static uint32_t nouveau_mem_fb_amount_igp(struct drm_device *dev) { -#if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) +#if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; uint32_t mem; From a294aa724a1e932fb6017383e08532bfcc914df0 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 23 Oct 2007 17:54:07 +1000 Subject: [PATCH 418/437] i915: require mfence before submitting batchbuffer --- shared-core/i915_dma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index d0d65f8c..f0fd6037 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1044,6 +1044,9 @@ static int i915_execbuffer(struct drm_device *dev, void *data, if (ret) goto out_free; + /* make sure all previous memory operations have passed */ + asm volatile("mfence":::"memory"); + /* submit buffer */ batch->start = buffers[num_buffers-1]->offset; From fd7c24753c4020a0022aaa183cfe8fc04a307abd Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 24 Oct 2007 11:13:15 +1100 Subject: [PATCH 419/437] i915: use a drm memory barrier define --- shared-core/i915_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index f0fd6037..9bc7431d 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1045,7 +1045,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data, goto out_free; /* make sure all previous memory operations have passed */ - asm volatile("mfence":::"memory"); + DRM_MEMORYBARRIER(); /* submit buffer */ batch->start = buffers[num_buffers-1]->offset; From 83199c257ea68a7cc0c6928109ff77bf25131819 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 24 Oct 2007 16:27:46 -0700 Subject: [PATCH 420/437] Fix missing \n on some DRM_ERROR in i915_dma.c --- shared-core/i915_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 9bc7431d..eb8c9153 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -494,7 +494,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, int i = 0, count, ret; if (cmd->sz & 0x3) { - DRM_ERROR("alignment"); + DRM_ERROR("alignment\n"); return -EINVAL; } @@ -532,7 +532,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, RING_LOCALS; if ((batch->start | batch->used) & 0x7) { - DRM_ERROR("alignment"); + DRM_ERROR("alignment\n"); return -EINVAL; } From 07abc3384e24356d1302459e2e5c4699ed7b0072 Mon Sep 17 00:00:00 2001 From: Roel Kluin <12o3l@tiscali.nl> Date: Thu, 25 Oct 2007 10:24:55 +1000 Subject: [PATCH 421/437] missing mutex unlock bug --- linux-core/sis_mm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 7e162a8e..9222b08d 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -133,6 +133,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } From c5f158abbe97492f56eb60ac54679945e9d6ddae Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 25 Oct 2007 16:52:33 +1000 Subject: [PATCH 422/437] i915: remove relocatee kernel mapping sooner stops mutex taking during sleep --- shared-core/i915_dma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index eb8c9153..acbb41dc 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -864,6 +864,9 @@ int i915_process_relocs(struct drm_file *file_priv, } while (reloc_offset != reloc_end); out: + drm_bo_kunmap(&relocatee->kmap); + relocatee->data_page = NULL; + drm_bo_kunmap(&reloc_kmap); mutex_lock(&dev->struct_mutex); @@ -901,7 +904,6 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, } } - drm_bo_kunmap(&relocatee.kmap); mutex_lock(&dev->struct_mutex); drm_bo_usage_deref_locked(&relocatee.buf); mutex_unlock(&dev->struct_mutex); From a70fe82baf0ca2be98e02680cff489f90b0ea3de Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 25 Oct 2007 16:53:18 +1000 Subject: [PATCH 423/437] i915: relocate buffers before validation add memory barrier between two --- shared-core/i915_dma.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index acbb41dc..1e15e7ce 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -964,6 +964,13 @@ int i915_validate_buffer_list(struct drm_file *file_priv, buf_handle = req->bo_req.handle; buf_reloc_handle = arg.reloc_handle; + if (buf_reloc_handle) { + ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count); + if (ret) + goto out_err; + DRM_MEMORYBARRIER(); + } + rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, req->bo_req.fence_class, req->bo_req.flags, @@ -988,11 +995,6 @@ int i915_validate_buffer_list(struct drm_file *file_priv, data = next; buf_count++; - if (buf_reloc_handle) { - ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count); - if (ret) - goto out_err; - } } while (next != 0); *num_buffers = buf_count; return 0; From b5cad27e05ad3666be8ccdf71e10d743efa5849e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 25 Oct 2007 09:49:33 +0200 Subject: [PATCH 424/437] Fix buffer object flag / mask checking. --- linux-core/drm_bo.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 9598e353..039873ca 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -921,37 +921,37 @@ int drm_bo_mem_space(struct drm_buffer_object * bo, EXPORT_SYMBOL(drm_bo_mem_space); static int drm_bo_new_mask(struct drm_buffer_object * bo, - uint64_t new_mask, uint32_t hint) + uint64_t new_flags, uint64_t used_mask) { uint32_t new_props; if (bo->type == drm_bo_type_user) { - DRM_ERROR("User buffers are not supported yet\n"); + DRM_ERROR("User buffers are not supported yet.\n"); return -EINVAL; } - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); + "processes.\n"); return -EPERM; } - if ((new_mask & DRM_BO_FLAG_NO_MOVE)) { + if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { DRM_ERROR ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); return -EPERM; } - new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_READ); + new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_READ); if (!new_props) { DRM_ERROR("Invalid buffer object rwx properties\n"); return -EINVAL; } - bo->mem.mask = new_mask; + bo->mem.mask = new_flags; return 0; } @@ -1490,7 +1490,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo, DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); - ret = drm_bo_new_mask(bo, flags, hint); + ret = drm_bo_new_mask(bo, flags, mask); if (ret) goto out; From 11f3e5e53f8fc4de90d1c289e0ba218ddfca23dc Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 25 Oct 2007 10:12:21 +0200 Subject: [PATCH 425/437] Buffer manager: Implement a version check IOCTL for drivers that don't use drmMMInit from user-space. Remove the minor check from the kernel code. That's really up to the driver. Bump major. --- libdrm/xf86drm.c | 24 ++++++++++++++++++++++++ linux-core/drm_bo.c | 21 +++++++++++++-------- linux-core/drm_drv.c | 1 + linux-core/drm_objects.h | 1 + shared-core/drm.h | 10 +++++++++- 5 files changed, 48 insertions(+), 9 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 82b77d7f..2f9d5c80 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2855,6 +2855,30 @@ int drmMMUnlock(int fd, unsigned memType, int unlockBM) return (ret) ? -errno : 0; } +int drmBOVersion(int fd, unsigned int *major, + unsigned int *minor, + unsigned int *patchlevel) +{ + struct drm_bo_version_arg arg; + int ret; + + memset(&arg, 0, sizeof(arg)); + ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg); + if (ret) + return ret; + + if (major) + *major = arg.major; + if (minor) + *minor = arg.minor; + if (patchlevel) + *patchlevel = arg.patchlevel; + + return (ret) ? -errno : 0; +} + + + #define DRM_MAX_FDS 16 static struct { char *BusID; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 039873ca..8d1e2f56 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2260,17 +2260,10 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ } if (arg->major != DRM_BO_INIT_MAJOR) { DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" - "\tversion don't match. Got %d, expected %d,\n", + "\tversion don't match. Got %d, expected %d.\n", arg->major, DRM_BO_INIT_MAJOR); return -EINVAL; } - if (arg->minor > DRM_BO_INIT_MINOR) { - DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" - "\tlibdrm buffer object interface version is %d.%d.\n" - "\tkernel DRM buffer object interface version is %d.%d\n", - arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); - return -EINVAL; - } mutex_lock(&dev->struct_mutex); if (!bm->initialized) { @@ -2535,3 +2528,15 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) return 0; } + +int drm_bo_version_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; + + arg->major = DRM_BO_INIT_MAJOR; + arg->minor = DRM_BO_INIT_MINOR; + arg->patchlevel = DRM_BO_INIT_PATCH; + + return 0; +} diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 9c867f1b..330566bb 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -145,6 +145,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 0b937dc0..702ece56 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -483,6 +483,7 @@ extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, diff --git a/shared-core/drm.h b/shared-core/drm.h index 80c1a3e2..a48f347e 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -758,8 +758,9 @@ struct drm_fence_arg { #define DRM_BO_HINT_WAIT_LAZY 0x00000008 #define DRM_BO_INIT_MAGIC 0xfe769812 -#define DRM_BO_INIT_MAJOR 0 +#define DRM_BO_INIT_MAJOR 1 #define DRM_BO_INIT_MINOR 1 +#define DRM_BO_INIT_PATCH 0 struct drm_bo_info_req { @@ -873,6 +874,12 @@ struct drm_bo_op_arg { #define DRM_BO_LOCK_UNLOCK_BM (1 << 0) #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) +struct drm_bo_version_arg { + uint32_t major; + uint32_t minor; + uint32_t patchlevel; +}; + struct drm_mm_type_arg { unsigned int mem_type; unsigned int lock_flags; @@ -976,6 +983,7 @@ struct drm_mm_init_arg { #define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) +#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) /*@}*/ From b9d9c30474238ac8ba4899a19fe4a97e9376f6c4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 25 Oct 2007 10:29:15 +0200 Subject: [PATCH 426/437] Tighten permissions on some buffer manager ioctls. Set bo init minor to 0. Add the version function to header. --- libdrm/xf86mm.h | 3 +++ linux-core/drm_drv.c | 12 ++++++++---- shared-core/drm.h | 2 +- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index b0923440..49ae2c04 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -179,6 +179,9 @@ extern int drmBOSetStatus(int fd, drmBO *buf, unsigned int hint, unsigned int desired_tile_stride, unsigned int tile_info); +extern int drmBOVersion(int fd, unsigned int *major, + unsigned int *minor, + unsigned int *patchlevel); #endif diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 330566bb..fe2b1200 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -123,10 +123,14 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), diff --git a/shared-core/drm.h b/shared-core/drm.h index a48f347e..ae88ce61 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -759,7 +759,7 @@ struct drm_fence_arg { #define DRM_BO_INIT_MAGIC 0xfe769812 #define DRM_BO_INIT_MAJOR 1 -#define DRM_BO_INIT_MINOR 1 +#define DRM_BO_INIT_MINOR 0 #define DRM_BO_INIT_PATCH 0 From 1681189e11b5a00ae72a55de932146ea37f7afd9 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 26 Oct 2007 10:25:57 +0200 Subject: [PATCH 427/437] Buffer flags and masks are 64-bit. don't mask off the high dword. Signed-off-by: Thomas Hellstrom --- linux-core/drm_bo.c | 12 ++++++------ linux-core/drm_bo_move.c | 12 ++++++------ linux-core/drm_objects.h | 3 ++- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8d1e2f56..16203c77 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -54,9 +54,9 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); -static inline uint32_t drm_bo_type_flags(unsigned type) +static inline uint64_t drm_bo_type_flags(unsigned type) { - return (1 << (24 + type)); + return (1ULL << (24 + type)); } /* @@ -785,10 +785,10 @@ static int drm_bo_mem_force_space(struct drm_device * dev, static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, - uint32_t mask, uint32_t * res_mask) + uint64_t mask, uint32_t * res_mask) { - uint32_t cur_flags = drm_bo_type_flags(mem_type); - uint32_t flag_diff; + uint64_t cur_flags = drm_bo_type_flags(mem_type); + uint64_t flag_diff; if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) cur_flags |= DRM_BO_FLAG_CACHED; @@ -1271,7 +1271,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags, int no_wait, int move_unfenced) { struct drm_device *dev = bo->dev; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 2a35d45b..7c86c4aa 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -53,8 +53,8 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo, { struct drm_ttm *ttm = bo->ttm; struct drm_bo_mem_reg *old_mem = &bo->mem; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; int ret; if (old_mem->mem_type == DRM_BO_MEM_TT) { @@ -210,8 +210,8 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo, void *old_iomap; void *new_iomap; int ret; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; unsigned long i; unsigned long page; unsigned long add = 0; @@ -333,8 +333,8 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; struct drm_buffer_object *old_obj; if (bo->fence) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 702ece56..8b14ac6f 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -511,7 +511,8 @@ extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signa int no_wait); extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); -extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, + uint64_t new_mem_flags, int no_wait, int move_unfenced); extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, From a4c87d3796cac374d25e01b26bdbb9028ce03107 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 26 Oct 2007 10:31:14 +0200 Subject: [PATCH 428/437] Minor libdrm fixes. --- libdrm/xf86drm.c | 4 ++-- libdrm/xf86mm.h | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 2f9d5c80..7001a0ef 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2865,7 +2865,7 @@ int drmBOVersion(int fd, unsigned int *major, memset(&arg, 0, sizeof(arg)); ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg); if (ret) - return ret; + return -errno; if (major) *major = arg.major; @@ -2874,7 +2874,7 @@ int drmBOVersion(int fd, unsigned int *major, if (patchlevel) *patchlevel = arg.patchlevel; - return (ret) ? -errno : 0; + return 0; } diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index 49ae2c04..d3df8497 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -29,6 +29,7 @@ #ifndef _XF86MM_H_ #define _XF86MM_H_ #include +#include #include "drm.h" /* @@ -37,7 +38,7 @@ * be protected using an external mutex. * * Note: Don't protect the following functions, as it may lead to deadlocks: - * drmBOUnmap(), drmFenceBuffers(). + * drmBOUnmap(). * The kernel is synchronizing and refcounting buffer maps. * User space only needs to refcount object usage within the same application. */ @@ -156,9 +157,6 @@ extern int drmBOUnreference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address); extern int drmBOUnmap(int fd, drmBO *buf); -extern int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class, uint64_t flags, - uint64_t mask, unsigned hint); - extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle); extern int drmBOInfo(int fd, drmBO *buf); extern int drmBOBusy(int fd, drmBO *buf, int *busy); From b9d8ddd3ca587e87474d37637096b9ebd0a927e9 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Fri, 26 Oct 2007 15:11:38 +0200 Subject: [PATCH 429/437] nouveau: flip the CHECK_STATE bit off on nv30. This lets you do 8-bit surface destination. --- shared-core/nv20_graph.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index ae0e0858..94ce32c1 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -28,7 +28,7 @@ #define NV25_GRCTX_SIZE (3529*4) #define NV2A_GRCTX_SIZE (3500*4) -#define NV30_31_GRCTX_SIZE (22392) +#define NV30_31_GRCTX_SIZE (24392) #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) @@ -3312,7 +3312,7 @@ int nv30_graph_init(struct drm_device *dev) NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); NV_WRITE(0x400890, 0x01b463ff); - NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf3de0475); + NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475); NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); NV_WRITE(0x400B80, 0x1003d888); From 6707ab862656d766a4c78b85e5584a29d2434126 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 26 Oct 2007 16:08:54 -0700 Subject: [PATCH 430/437] update DRM sysfs support Make DRM devices use real Linux devices instead of class devices, which are going away. While we're at it, clean up some of the interfaces to take struct drm_device * or struct device * and use the global drm_class where needed instead of passing it around. --- linux-core/drmP.h | 10 +-- linux-core/drm_drv.c | 4 +- linux-core/drm_stub.c | 7 +- linux-core/drm_sysfs.c | 151 ++++++++++++++++++++++++++++------------- 4 files changed, 113 insertions(+), 59 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index d0ab2c94..82a3a23c 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -619,6 +619,8 @@ struct drm_driver { void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); + int (*suspend) (struct drm_device *); + int (*resume) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); @@ -697,6 +699,7 @@ struct drm_head { * may contain multiple heads. */ struct drm_device { + struct device dev; /**< Linux device */ char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ char *devname; /**< For /proc/interrupts */ @@ -1163,10 +1166,9 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; extern struct class *drm_sysfs_create(struct module *owner, char *name); -extern void drm_sysfs_destroy(struct class *cs); -extern struct class_device *drm_sysfs_device_add(struct class *cs, - struct drm_head * head); -extern void drm_sysfs_device_remove(struct class_device *class_dev); +extern void drm_sysfs_destroy(void); +extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head * head); +extern void drm_sysfs_device_remove(struct drm_device *dev); /* * Basic memory manager support (drm_mm.c) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index fe2b1200..47d17651 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -519,7 +519,7 @@ static int __init drm_core_init(void) CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; err_p3: - drm_sysfs_destroy(drm_class); + drm_sysfs_destroy(); err_p2: unregister_chrdev(DRM_MAJOR, "drm"); drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); @@ -530,7 +530,7 @@ err_p1: static void __exit drm_core_exit(void) { remove_proc_entry("dri", NULL); - drm_sysfs_destroy(drm_class); + drm_sysfs_destroy(); unregister_chrdev(DRM_MAJOR, "drm"); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 9e140ac2..1d88d375 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -183,11 +183,10 @@ static int drm_get_head(struct drm_device * dev, struct drm_head * head) goto err_g1; } - head->dev_class = drm_sysfs_device_add(drm_class, head); - if (IS_ERR(head->dev_class)) { + ret = drm_sysfs_device_add(dev, head); + if (ret) { printk(KERN_ERR "DRM: Error sysfs_device_add.\n"); - ret = PTR_ERR(head->dev_class); goto err_g2; } *heads = head; @@ -316,7 +315,7 @@ int drm_put_head(struct drm_head * head) DRM_DEBUG("release secondary minor %d\n", minor); drm_proc_cleanup(minor, drm_proc_root, head->dev_root); - drm_sysfs_device_remove(head->dev_class); + drm_sysfs_device_remove(head->dev); *head = (struct drm_head){.dev = NULL}; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index cf4349b0..6f8623ce 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -19,6 +19,45 @@ #include "drm_core.h" #include "drmP.h" +#define to_drm_device(d) container_of(d, struct drm_device, dev) + +/** + * drm_sysfs_suspend - DRM class suspend hook + * @dev: Linux device to suspend + * @state: power state to enter + * + * Just figures out what the actual struct drm_device associated with + * @dev is and calls its suspend hook, if present. + */ +static int drm_sysfs_suspend(struct device *dev, pm_message_t state) +{ + struct drm_device *drm_dev = to_drm_device(dev); + + printk(KERN_ERR "%s\n", __FUNCTION__); + + if (drm_dev->driver->suspend) + return drm_dev->driver->suspend(drm_dev); + + return 0; +} + +/** + * drm_sysfs_resume - DRM class resume hook + * @dev: Linux device to resume + * + * Just figures out what the actual struct drm_device associated with + * @dev is and calls its resume hook, if present. + */ +static int drm_sysfs_resume(struct device *dev) +{ + struct drm_device *drm_dev = to_drm_device(dev); + + if (drm_dev->driver->resume) + return drm_dev->driver->resume(drm_dev); + + return 0; +} + /* Display the version of drm_core. This doesn't work right in current design */ static ssize_t version_show(struct class *dev, char *buf) { @@ -33,7 +72,7 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL); * @owner: pointer to the module that is to "own" this struct drm_sysfs_class * @name: pointer to a string for the name of this class. * - * This is used to create a struct drm_sysfs_class pointer that can then be used + * This is used to create DRM class pointer that can then be used * in calls to drm_sysfs_device_add(). * * Note, the pointer created here is to be destroyed when finished by making a @@ -50,6 +89,9 @@ struct class *drm_sysfs_create(struct module *owner, char *name) goto err_out; } + class->suspend = drm_sysfs_suspend; + class->resume = drm_sysfs_resume; + err = class_create_file(class, &class_attr_version); if (err) goto err_out_class; @@ -63,94 +105,105 @@ err_out: } /** - * drm_sysfs_destroy - destroys a struct drm_sysfs_class structure - * @cs: pointer to the struct drm_sysfs_class that is to be destroyed + * drm_sysfs_destroy - destroys DRM class * - * Note, the pointer to be destroyed must have been created with a call to - * drm_sysfs_create(). + * Destroy the DRM device class. */ -void drm_sysfs_destroy(struct class *class) +void drm_sysfs_destroy(void) { - if ((class == NULL) || (IS_ERR(class))) + if ((drm_class == NULL) || (IS_ERR(drm_class))) return; - - class_remove_file(class, &class_attr_version); - class_destroy(class); + class_remove_file(drm_class, &class_attr_version); + class_destroy(drm_class); } -static ssize_t show_dri(struct class_device *class_device, char *buf) +static ssize_t show_dri(struct device *device, struct device_attribute *attr, + char *buf) { - struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev; + struct drm_device *dev = to_drm_device(device); if (dev->driver->dri_library_name) return dev->driver->dri_library_name(dev, buf); return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); } -static struct class_device_attribute class_device_attrs[] = { +static struct device_attribute device_attrs[] = { __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), }; /** - * drm_sysfs_device_add - adds a class device to sysfs for a character driver - * @cs: pointer to the struct class that this device should be registered to. - * @dev: the dev_t for the device to be added. - * @device: a pointer to a struct device that is assiociated with this class device. - * @fmt: string for the class device's name + * drm_sysfs_device_release - do nothing + * @dev: Linux device * - * A struct class_device will be created in sysfs, registered to the specified - * class. A "dev" file will be created, showing the dev_t for the device. The - * pointer to the struct class_device will be returned from the call. Any further - * sysfs files that might be required can be created using this pointer. - * Note: the struct class passed to this function must have previously been - * created with a call to drm_sysfs_create(). + * Normally, this would free the DRM device associated with @dev, along + * with cleaning up any other stuff. But we do that in the DRM core, so + * this function can just return and hope that the core does its job. */ -struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head) +static void drm_sysfs_device_release(struct device *dev) { - struct class_device *class_dev; - int i, j, err; + return; +} - class_dev = class_device_create(cs, NULL, - MKDEV(DRM_MAJOR, head->minor), - &(head->dev->pdev)->dev, - "card%d", head->minor); - if (IS_ERR(class_dev)) { - err = PTR_ERR(class_dev); +/** + * drm_sysfs_device_add - adds a class device to sysfs for a character driver + * @dev: DRM device to be added + * @head: DRM head in question + * + * Add a DRM device to the DRM's device model class. We use @dev's PCI device + * as the parent for the Linux device, and make sure it has a file containing + * the driver we're using (for userspace compatibility). + */ +int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head) +{ + int err; + int i, j; + + dev->dev.parent = &dev->pdev->dev; + dev->dev.class = drm_class; + dev->dev.release = drm_sysfs_device_release; + /* + * This will actually add the major:minor file so that udev + * will create the device node. We don't want to do that just + * yet... + */ + /* dev->dev.devt = head->device; */ + snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor); + + err = device_register(&dev->dev); + if (err) { + DRM_ERROR("device add failed: %d\n", err); goto err_out; } - class_set_devdata(class_dev, head); - - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { - err = class_device_create_file(class_dev, - &class_device_attrs[i]); + for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { + err = device_create_file(&dev->dev, &device_attrs[i]); if (err) goto err_out_files; } - return class_dev; + return 0; err_out_files: if (i > 0) for (j = 0; j < i; j++) - class_device_remove_file(class_dev, - &class_device_attrs[i]); - class_device_unregister(class_dev); + device_remove_file(&dev->dev, &device_attrs[i]); + device_unregister(&dev->dev); err_out: - return ERR_PTR(err); + + return err; } /** - * drm_sysfs_device_remove - removes a class device that was created with drm_sysfs_device_add() - * @dev: the dev_t of the device that was previously registered. + * drm_sysfs_device_remove - remove DRM device + * @dev: DRM device to remove * * This call unregisters and cleans up a class device that was created with a * call to drm_sysfs_device_add() */ -void drm_sysfs_device_remove(struct class_device *class_dev) +void drm_sysfs_device_remove(struct drm_device *dev) { int i; - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_remove_file(class_dev, &class_device_attrs[i]); - class_device_unregister(class_dev); + for (i = 0; i < ARRAY_SIZE(device_attrs); i++) + device_remove_file(&dev->dev, &device_attrs[i]); + device_unregister(&dev->dev); } From 1e2a2bababf3fbaa0a665983856761c2284dba30 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 26 Oct 2007 16:10:02 -0700 Subject: [PATCH 431/437] i915: suspend/resume support Add suspend/resume support to the i915 driver. Moves some of the initialization into the driver load routine, and fixes up places where we assumed no dev_private existed in some of the cleanup paths. This allows us to suspend/resume properly even if X isn't running. --- linux-core/i915_drv.c | 455 ++++++++++++++++++++++++++++ shared-core/i915_dma.c | 112 +++---- shared-core/i915_drv.h | 663 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1176 insertions(+), 54 deletions(-) diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index e337e1d2..f34d218c 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -69,6 +69,458 @@ static struct drm_bo_driver i915_bo_driver = { }; #endif +enum pipe { + PIPE_A = 0, + PIPE_B, +}; + +static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (pipe == PIPE_A) + return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); + else + return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); +} + +static void i915_save_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (pipe == PIPE_A) + array = dev_priv->save_palette_a; + else + array = dev_priv->save_palette_b; + + for(i = 0; i < 256; i++) + array[i] = I915_READ(reg + (i << 2)); +} + +static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (pipe == PIPE_A) + array = dev_priv->save_palette_a; + else + array = dev_priv->save_palette_b; + + for(i = 0; i < 256; i++) + I915_WRITE(reg + (i << 2), array[i]); +} + +static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg) +{ + outb(reg, index_port); + return inb(data_port); +} + +static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable) +{ + inb(st01); + outb(palette_enable | reg, VGA_AR_INDEX); + return inb(VGA_AR_DATA_READ); +} + +static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable) +{ + inb(st01); + outb(palette_enable | reg, VGA_AR_INDEX); + outb(val, VGA_AR_DATA_WRITE); +} + +static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val) +{ + outb(reg, index_port); + outb(val, data_port); +} + +static void i915_save_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* VGA color palette registers */ + dev_priv->saveDACMASK = inb(VGA_DACMASK); + /* DACCRX automatically increments during read */ + outb(0, VGA_DACRX); + /* Read 3 bytes of color data from each index */ + for (i = 0; i < 256 * 3; i++) + dev_priv->saveDACDATA[i] = inb(VGA_DACDATA); + + /* MSR bits */ + dev_priv->saveMSR = inb(VGA_MSR_READ); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* CRT controller regs */ + i915_write_indexed(cr_index, cr_data, 0x11, + i915_read_indexed(cr_index, cr_data, 0x11) & + (~0x80)); + for (i = 0; i < 0x24; i++) + dev_priv->saveCR[i] = + i915_read_indexed(cr_index, cr_data, i); + /* Make sure we don't turn off CR group 0 writes */ + dev_priv->saveCR[0x11] &= ~0x80; + + /* Attribute controller registers */ + inb(st01); + dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX); + for (i = 0; i < 20; i++) + dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); + inb(st01); + outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); + + /* Graphics controller registers */ + for (i = 0; i < 9; i++) + dev_priv->saveGR[i] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i); + + dev_priv->saveGR[0x10] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10); + dev_priv->saveGR[0x11] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11); + dev_priv->saveGR[0x18] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18); + + /* Sequencer registers */ + for (i = 0; i < 8; i++) + dev_priv->saveSR[i] = + i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i); +} + +static void i915_restore_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* MSR bits */ + outb(dev_priv->saveMSR, VGA_MSR_WRITE); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* Sequencer registers, don't write SR07 */ + for (i = 0; i < 7; i++) + i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i, + dev_priv->saveSR[i]); + + /* CRT controller regs */ + /* Enable CR group 0 writes */ + i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); + for (i = 0; i < 0x24; i++) + i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]); + + /* Graphics controller regs */ + for (i = 0; i < 9; i++) + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i, + dev_priv->saveGR[i]); + + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10, + dev_priv->saveGR[0x10]); + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11, + dev_priv->saveGR[0x11]); + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18, + dev_priv->saveGR[0x18]); + + /* Attribute controller registers */ + for (i = 0; i < 20; i++) + i915_write_ar(st01, i, dev_priv->saveAR[i], 0); + inb(st01); /* switch back to index mode */ + outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); + + /* VGA color palette registers */ + outb(dev_priv->saveDACMASK, VGA_DACMASK); + /* DACCRX automatically increments during read */ + outb(0, VGA_DACWX); + /* Read 3 bytes of color data from each index */ + for (i = 0; i < 256 * 3; i++) + outb(dev_priv->saveDACDATA[i], VGA_DACDATA); + +} + +static int i915_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (!dev || !dev_priv) { + printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); + printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + pci_save_state(dev->pdev); + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); + + /* Pipe & plane A info */ + dev_priv->savePIPEACONF = I915_READ(PIPEACONF); + dev_priv->savePIPEASRC = I915_READ(PIPEASRC); + dev_priv->saveFPA0 = I915_READ(FPA0); + dev_priv->saveFPA1 = I915_READ(FPA1); + dev_priv->saveDPLL_A = I915_READ(DPLL_A); + if (IS_I965G(dev)) + dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); + dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); + dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); + dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); + dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); + dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); + dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + + dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); + dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); + dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); + dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); + dev_priv->saveDSPABASE = I915_READ(DSPABASE); + if (IS_I965G(dev)) { + dev_priv->saveDSPASURF = I915_READ(DSPASURF); + dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); + } + i915_save_palette(dev, PIPE_A); + + /* Pipe & plane B info */ + dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); + dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); + dev_priv->saveFPB0 = I915_READ(FPB0); + dev_priv->saveFPB1 = I915_READ(FPB1); + dev_priv->saveDPLL_B = I915_READ(DPLL_B); + if (IS_I965G(dev)) + dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); + dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); + dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); + dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); + dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); + dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); + dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + + dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); + dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); + dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); + dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); + dev_priv->saveDSPBBASE = I915_READ(DSPBBASE); + if (IS_I965GM(dev)) { + dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); + dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); + } + i915_save_palette(dev, PIPE_B); + + /* CRT state */ + dev_priv->saveADPA = I915_READ(ADPA); + + /* LVDS state */ + dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + if (IS_I965G(dev)) + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + dev_priv->saveLVDS = I915_READ(LVDS); + if (!IS_I830(dev) && !IS_845G(dev)) + dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON); + dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF); + dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE); + + /* FIXME: save TV & SDVO state */ + + /* FBC state */ + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); + + /* VGA state */ + dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0); + dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1); + dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV); + dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); + + /* Scratch space */ + for (i = 0; i < 16; i++) { + dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2)); + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + } + for (i = 0; i < 3; i++) + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + + i915_save_vga(dev); + + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3hot); + + return 0; +} + +static int i915_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_set_power_state(dev->pdev, PCI_D0); + pci_restore_state(dev->pdev); + if (pci_enable_device(dev->pdev)) + return -1; + + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); + + /* Pipe & plane A info */ + /* Prime the clock */ + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & + ~DPLL_VCO_ENABLE); + udelay(150); + } + I915_WRITE(FPA0, dev_priv->saveFPA0); + I915_WRITE(FPA1, dev_priv->saveFPA1); + /* Actually enable it */ + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); + udelay(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); + udelay(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); + I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); + + /* Restore plane info */ + I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(DSPABASE, dev_priv->saveDSPABASE); + I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); + } + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); + I915_WRITE(DSPABASE, I915_READ(DSPABASE)); + + /* Pipe & plane B info */ + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & + ~DPLL_VCO_ENABLE); + udelay(150); + } + I915_WRITE(FPB0, dev_priv->saveFPB0); + I915_WRITE(FPB1, dev_priv->saveFPB1); + /* Actually enable it */ + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); + udelay(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); + udelay(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); + I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); + + /* Restore plane info */ + I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE); + I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); + } + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); + I915_WRITE(DSPBBASE, I915_READ(DSPBBASE)); + + /* CRT state */ + I915_WRITE(ADPA, dev_priv->saveADPA); + + /* LVDS state */ + if (IS_I965G(dev)) + I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + I915_WRITE(LVDS, dev_priv->saveLVDS); + if (!IS_I830(dev) && !IS_845G(dev)) + I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); + + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON); + I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF); + I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE); + I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + + /* FIXME: restore TV & SDVO state */ + + /* FBC info */ + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); + + /* VGA state */ + I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); + I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0); + I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1); + I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV); + udelay(150); + + for (i = 0; i < 16; i++) { + I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); + } + for (i = 0; i < 3; i++) + I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); + + i915_restore_vga(dev); + + return 0; +} + static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { /* don't use mtrr's here, the Xserver or user space app should @@ -79,9 +531,12 @@ static struct drm_driver driver = { DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2, .load = i915_driver_load, + .unload = i915_driver_unload, .firstopen = i915_driver_firstopen, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, + .suspend = i915_suspend, + .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, .vblank_wait = i915_driver_vblank_wait, .vblank_wait2 = i915_driver_vblank_wait2, diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index b1168635..24a4ec4a 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -31,17 +31,6 @@ #include "i915_drm.h" #include "i915_drv.h" -#define IS_I965G(dev) (dev->pci_device == 0x2972 || \ - dev->pci_device == 0x2982 || \ - dev->pci_device == 0x2992 || \ - dev->pci_device == 0x29A2 || \ - dev->pci_device == 0x2A02 || \ - dev->pci_device == 0x2A12) - -#define IS_G33(dev) (dev->pci_device == 0x29C2 || \ - dev->pci_device == 0x29B2 || \ - dev->pci_device == 0x29D2) - /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring @@ -91,6 +80,7 @@ void i915_kernel_lost_context(struct drm_device * dev) static int i915_dma_cleanup(struct drm_device * dev) { + drm_i915_private_t *dev_priv = dev->dev_private; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. @@ -98,50 +88,42 @@ static int i915_dma_cleanup(struct drm_device * dev) if (dev->irq) drm_irq_uninstall(dev); - if (dev->dev_private) { - drm_i915_private_t *dev_priv = - (drm_i915_private_t *) dev->dev_private; + if (dev_priv->ring.virtual_start) { + drm_core_ioremapfree(&dev_priv->ring.map, dev); + dev_priv->ring.virtual_start = 0; + dev_priv->ring.map.handle = 0; + dev_priv->ring.map.size = 0; + } - if (dev_priv->ring.virtual_start) { - drm_core_ioremapfree(&dev_priv->ring.map, dev); - } + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + /* Need to rewrite hardware status page */ + I915_WRITE(0x02080, 0x1ffff000); + } - if (dev_priv->status_page_dmah) { - drm_pci_free(dev, dev_priv->status_page_dmah); - /* Need to rewrite hardware status page */ - I915_WRITE(0x02080, 0x1ffff000); - } - if (dev_priv->status_gfx_addr) { - dev_priv->status_gfx_addr = 0; - drm_core_ioremapfree(&dev_priv->hws_map, dev); - I915_WRITE(0x02080, 0x1ffff000); - } - drm_free(dev->dev_private, sizeof(drm_i915_private_t), - DRM_MEM_DRIVER); - - dev->dev_private = NULL; + if (dev_priv->status_gfx_addr) { + dev_priv->status_gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + I915_WRITE(0x02080, 0x1ffff000); } return 0; } -static int i915_initialize(struct drm_device * dev, - drm_i915_private_t * dev_priv, - drm_i915_init_t * init) +static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { - memset(dev_priv, 0, sizeof(drm_i915_private_t)); + drm_i915_private_t *dev_priv = dev->dev_private; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); - dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); return -EINVAL; } dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio_map) { - dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("can not find mmio map!\n"); return -EINVAL; @@ -168,7 +150,6 @@ static int i915_initialize(struct drm_device * dev, drm_core_ioremap(&dev_priv->ring.map, dev); if (dev_priv->ring.map.handle == NULL) { - dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -199,7 +180,6 @@ static int i915_initialize(struct drm_device * dev, drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); if (!dev_priv->status_page_dmah) { - dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; @@ -212,7 +192,6 @@ static int i915_initialize(struct drm_device * dev, I915_WRITE(0x02080, dev_priv->dma_status_page); } DRM_DEBUG("Enabled hardware status page\n"); - dev->dev_private = (void *)dev_priv; mutex_init(&dev_priv->cmdbuf_mutex); return 0; } @@ -258,17 +237,12 @@ static int i915_dma_resume(struct drm_device * dev) static int i915_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv; drm_i915_init_t *init = data; int retcode = 0; switch (init->func) { case I915_INIT_DMA: - dev_priv = drm_alloc(sizeof(drm_i915_private_t), - DRM_MEM_DRIVER); - if (dev_priv == NULL) - return -ENOMEM; - retcode = i915_initialize(dev, dev_priv, init); + retcode = i915_initialize(dev, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); @@ -1299,7 +1273,6 @@ static int i915_set_status_page(struct drm_device *dev, void *data, drm_core_ioremap(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { - dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); dev_priv->status_gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" @@ -1318,6 +1291,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data, int i915_driver_load(struct drm_device *dev, unsigned long flags) { + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long base, size; + int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; + /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1325,25 +1302,52 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; + dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); + if (dev_priv == NULL) + return -ENOMEM; + + memset(dev_priv, 0, sizeof(drm_i915_private_t)); + + dev->dev_private = (void *)dev_priv; + + /* Add register map (needed for suspend/resume) */ + base = drm_get_resource_start(dev, mmio_bar); + size = drm_get_resource_len(dev, mmio_bar); + + ret = drm_addmap(dev, base, size, _DRM_REGISTERS, _DRM_KERNEL, + &dev_priv->mmio_map); + + return ret; +} + +int i915_driver_unload(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->mmio_map) + drm_rmmap(dev, dev_priv->mmio_map); + + drm_free(dev->dev_private, sizeof(drm_i915_private_t), + DRM_MEM_DRIVER); return 0; } void i915_driver_lastclose(struct drm_device * dev) { - if (dev->dev_private) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (drm_getsarea(dev) && dev_priv->sarea_priv) i915_do_cleanup_pageflip(dev); + if (dev_priv->agp_heap) i915_mem_takedown(&(dev_priv->agp_heap)); - } + i915_dma_cleanup(dev); } void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { - if (dev->dev_private) { - drm_i915_private_t *dev_priv = dev->dev_private; - i915_mem_release(dev, file_priv, dev_priv->agp_heap); - } + drm_i915_private_t *dev_priv = dev->dev_private; + i915_mem_release(dev, file_priv, dev_priv->agp_heap); } struct drm_ioctl_desc i915_ioctls[] = { diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 817288b6..07a173ac 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -146,6 +146,88 @@ typedef struct drm_i915_private { drm_i915_vbl_swap_t vbl_swaps; unsigned int swaps_pending; + /* Register state */ + u8 saveLBB; + u32 saveDSPACNTR; + u32 saveDSPBCNTR; + u32 savePIPEACONF; + u32 savePIPEBCONF; + u32 savePIPEASRC; + u32 savePIPEBSRC; + u32 saveFPA0; + u32 saveFPA1; + u32 saveDPLL_A; + u32 saveDPLL_A_MD; + u32 saveHTOTAL_A; + u32 saveHBLANK_A; + u32 saveHSYNC_A; + u32 saveVTOTAL_A; + u32 saveVBLANK_A; + u32 saveVSYNC_A; + u32 saveBCLRPAT_A; + u32 saveDSPASTRIDE; + u32 saveDSPASIZE; + u32 saveDSPAPOS; + u32 saveDSPABASE; + u32 saveDSPASURF; + u32 saveDSPATILEOFF; + u32 savePFIT_PGM_RATIOS; + u32 saveBLC_PWM_CTL; + u32 saveBLC_PWM_CTL2; + u32 saveFPB0; + u32 saveFPB1; + u32 saveDPLL_B; + u32 saveDPLL_B_MD; + u32 saveHTOTAL_B; + u32 saveHBLANK_B; + u32 saveHSYNC_B; + u32 saveVTOTAL_B; + u32 saveVBLANK_B; + u32 saveVSYNC_B; + u32 saveBCLRPAT_B; + u32 saveDSPBSTRIDE; + u32 saveDSPBSIZE; + u32 saveDSPBPOS; + u32 saveDSPBBASE; + u32 saveDSPBSURF; + u32 saveDSPBTILEOFF; + u32 saveVCLK_DIVISOR_VGA0; + u32 saveVCLK_DIVISOR_VGA1; + u32 saveVCLK_POST_DIV; + u32 saveVGACNTRL; + u32 saveADPA; + u32 saveLVDS; + u32 saveLVDSPP_ON; + u32 saveLVDSPP_OFF; + u32 saveDVOA; + u32 saveDVOB; + u32 saveDVOC; + u32 savePP_ON; + u32 savePP_OFF; + u32 savePP_CONTROL; + u32 savePP_CYCLE; + u32 savePFIT_CONTROL; + u32 save_palette_a[256]; + u32 save_palette_b[256]; + u32 saveFBC_CFB_BASE; + u32 saveFBC_LL_BASE; + u32 saveFBC_CONTROL; + u32 saveFBC_CONTROL2; + u32 saveSWF0[16]; + u32 saveSWF1[16]; + u32 saveSWF2[3]; + u8 saveMSR; + u8 saveSR[8]; + u8 saveGR[24]; + u8 saveAR_INDEX; + u8 saveAR[20]; + u8 saveDACMASK; + u8 saveDACDATA[256*3]; /* 256 3-byte colors */ + u8 saveCR[36]; + u8 savePLANE0[64*1024]; + u8 savePLANE1[64*1024]; + u8 savePLANE2[64*1024]; + u8 savePLANE3[64*1024]; } drm_i915_private_t; enum intel_chip_family { @@ -161,6 +243,7 @@ extern int i915_max_ioctl; /* i915_dma.c */ extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); +extern int i915_driver_unload(struct drm_device *); extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); @@ -273,6 +356,50 @@ extern int i915_move(struct drm_buffer_object *bo, int evict, extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); +/* Extended config space */ +#define LBB 0xf4 + +/* VGA stuff */ + +#define VGA_ST01_MDA 0x3ba +#define VGA_ST01_CGA 0x3da + +#define VGA_MSR_WRITE 0x3c2 +#define VGA_MSR_READ 0x3cc +#define VGA_MSR_MEM_EN (1<<1) +#define VGA_MSR_CGA_MODE (1<<0) + +#define VGA_SR_INDEX 0x3c4 +#define VGA_SR_DATA 0x3c5 + +#define VGA_AR_INDEX 0x3c0 +#define VGA_AR_VID_EN (1<<5) +#define VGA_AR_DATA_WRITE 0x3c0 +#define VGA_AR_DATA_READ 0x3c1 + +#define VGA_GR_INDEX 0x3ce +#define VGA_GR_DATA 0x3cf +/* GR05 */ +#define VGA_GR_MEM_READ_MODE_SHIFT 3 +#define VGA_GR_MEM_READ_MODE_PLANE 1 +/* GR06 */ +#define VGA_GR_MEM_MODE_MASK 0xc +#define VGA_GR_MEM_MODE_SHIFT 2 +#define VGA_GR_MEM_A0000_AFFFF 0 +#define VGA_GR_MEM_A0000_BFFFF 1 +#define VGA_GR_MEM_B0000_B7FFF 2 +#define VGA_GR_MEM_B0000_BFFFF 3 + +#define VGA_DACMASK 0x3c6 +#define VGA_DACRX 0x3c7 +#define VGA_DACWX 0x3c8 +#define VGA_DACDATA 0x3c9 + +#define VGA_CR_INDEX_MDA 0x3b4 +#define VGA_CR_DATA_MDA 0x3b5 +#define VGA_CR_INDEX_CGA 0x3d4 +#define VGA_CR_DATA_CGA 0x3d5 + #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) @@ -295,6 +422,37 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define BB1_UNPROTECTED (0<<0) #define BB2_END_ADDR_MASK (~0x7) +/* Framebuffer compression */ +#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ +#define FBC_LL_BASE 0x03204 /* 4k page aligned */ +#define FBC_CONTROL 0x03208 +#define FBC_CTL_EN (1<<31) +#define FBC_CTL_PERIODIC (1<<30) +#define FBC_CTL_INTERVAL_SHIFT (16) +#define FBC_CTL_UNCOMPRESSIBLE (1<<14) +#define FBC_CTL_STRIDE_SHIFT (5) +#define FBC_CTL_FENCENO (1<<0) +#define FBC_COMMAND 0x0320c +#define FBC_CMD_COMPRESS (1<<0) +#define FBC_STATUS 0x03210 +#define FBC_STAT_COMPRESSING (1<<31) +#define FBC_STAT_COMPRESSED (1<<30) +#define FBC_STAT_MODIFIED (1<<29) +#define FBC_STAT_CURRENT_LINE (1<<0) +#define FBC_CONTROL2 0x03214 +#define FBC_CTL_FENCE_DBL (0<<4) +#define FBC_CTL_IDLE_IMM (0<<2) +#define FBC_CTL_IDLE_FULL (1<<2) +#define FBC_CTL_IDLE_LINE (2<<2) +#define FBC_CTL_IDLE_DEBUG (3<<2) +#define FBC_CTL_CPU_FENCE (1<<1) +#define FBC_CTL_PLANEA (0<<0) +#define FBC_CTL_PLANEB (1<<0) +#define FBC_FENCE_OFF 0x0321b + +#define FBC_LL_SIZE (1536) +#define FBC_LL_PAD (32) + /* Interrupt bits: */ #define USER_INT_FLAG (1<<1) @@ -516,4 +674,509 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) + +#define BLC_PWM_CTL 0x61254 +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) + +#define BLC_PWM_CTL2 0x61250 +/** + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) +/** + * This is the number of cycles out of the backlight modulation cycle for which + * the backlight is on. + * + * This field must be no greater than the number of cycles in the complete + * backlight modulation cycle. + */ +#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) +#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) + +#define I915_GCFGC 0xf0 +#define I915_LOW_FREQUENCY_ENABLE (1 << 7) +#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4) +#define I915_DISPLAY_CLOCK_MASK (7 << 4) + +#define I855_HPLLCC 0xc0 +#define I855_CLOCK_CONTROL_MASK (3 << 0) +#define I855_CLOCK_133_200 (0 << 0) +#define I855_CLOCK_100_200 (1 << 0) +#define I855_CLOCK_100_133 (2 << 0) +#define I855_CLOCK_166_250 (3 << 0) + +/* p317, 319 + */ +#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */ +#define VCLK2_VCO_N 0x600a +#define VCLK2_VCO_DIV_SEL 0x6012 + +#define VCLK_DIVISOR_VGA0 0x6000 +#define VCLK_DIVISOR_VGA1 0x6004 +#define VCLK_POST_DIV 0x6010 +/** Selects a post divisor of 4 instead of 2. */ +# define VGA1_PD_P2_DIV_4 (1 << 15) +/** Overrides the p2 post divisor field */ +# define VGA1_PD_P1_DIV_2 (1 << 13) +# define VGA1_PD_P1_SHIFT 8 +/** P1 value is 2 greater than this field */ +# define VGA1_PD_P1_MASK (0x1f << 8) +/** Selects a post divisor of 4 instead of 2. */ +# define VGA0_PD_P2_DIV_4 (1 << 7) +/** Overrides the p2 post divisor field */ +# define VGA0_PD_P1_DIV_2 (1 << 5) +# define VGA0_PD_P1_SHIFT 0 +/** P1 value is 2 greater than this field */ +# define VGA0_PD_P1_MASK (0x1f << 0) + +/* I830 CRTC registers */ +#define HTOTAL_A 0x60000 +#define HBLANK_A 0x60004 +#define HSYNC_A 0x60008 +#define VTOTAL_A 0x6000c +#define VBLANK_A 0x60010 +#define VSYNC_A 0x60014 +#define PIPEASRC 0x6001c +#define BCLRPAT_A 0x60020 +#define VSYNCSHIFT_A 0x60028 + +#define HTOTAL_B 0x61000 +#define HBLANK_B 0x61004 +#define HSYNC_B 0x61008 +#define VTOTAL_B 0x6100c +#define VBLANK_B 0x61010 +#define VSYNC_B 0x61014 +#define PIPEBSRC 0x6101c +#define BCLRPAT_B 0x61020 +#define VSYNCSHIFT_B 0x61028 + +#define PP_STATUS 0x61200 +# define PP_ON (1 << 31) +/** + * Indicates that all dependencies of the panel are on: + * + * - PLL enabled + * - pipe enabled + * - LVDS/DVOB/DVOC on + */ +# define PP_READY (1 << 30) +# define PP_SEQUENCE_NONE (0 << 28) +# define PP_SEQUENCE_ON (1 << 28) +# define PP_SEQUENCE_OFF (2 << 28) +# define PP_SEQUENCE_MASK 0x30000000 +#define PP_CONTROL 0x61204 +# define POWER_TARGET_ON (1 << 0) + +#define LVDSPP_ON 0x61208 +#define LVDSPP_OFF 0x6120c +#define PP_CYCLE 0x61210 + +#define PFIT_CONTROL 0x61230 +# define PFIT_ENABLE (1 << 31) +# define PFIT_PIPE_MASK (3 << 29) +# define PFIT_PIPE_SHIFT 29 +# define VERT_INTERP_DISABLE (0 << 10) +# define VERT_INTERP_BILINEAR (1 << 10) +# define VERT_INTERP_MASK (3 << 10) +# define VERT_AUTO_SCALE (1 << 9) +# define HORIZ_INTERP_DISABLE (0 << 6) +# define HORIZ_INTERP_BILINEAR (1 << 6) +# define HORIZ_INTERP_MASK (3 << 6) +# define HORIZ_AUTO_SCALE (1 << 5) +# define PANEL_8TO6_DITHER_ENABLE (1 << 3) + +#define PFIT_PGM_RATIOS 0x61234 +# define PFIT_VERT_SCALE_MASK 0xfff00000 +# define PFIT_HORIZ_SCALE_MASK 0x0000fff0 + +#define PFIT_AUTO_RATIOS 0x61238 + + +#define DPLL_A 0x06014 +#define DPLL_B 0x06018 +# define DPLL_VCO_ENABLE (1 << 31) +# define DPLL_DVO_HIGH_SPEED (1 << 30) +# define DPLL_SYNCLOCK_ENABLE (1 << 29) +# define DPLL_VGA_MODE_DIS (1 << 28) +# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ +# define DPLLB_MODE_LVDS (2 << 26) /* i915 */ +# define DPLL_MODE_MASK (3 << 26) +# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ +# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ +# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ +# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ +# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ +# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ +/** + * The i830 generation, in DAC/serial mode, defines p1 as two plus this + * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set. + */ +# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 +/** + * The i830 generation, in LVDS mode, defines P1 as the bit number set within + * this field (only one bit may be set). + */ +# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 +# define DPLL_FPA01_P1_POST_DIV_SHIFT 16 +# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */ +# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ +# define PLL_REF_INPUT_DREFCLK (0 << 13) +# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ +# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ +# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) +# define PLL_REF_INPUT_MASK (3 << 13) +# define PLL_LOAD_PULSE_PHASE_SHIFT 9 +/* + * Parallel to Serial Load Pulse phase selection. + * Selects the phase for the 10X DPLL clock for the PCIe + * digital display port. The range is 4 to 13; 10 or more + * is just a flip delay. The default is 6 + */ +# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) +# define DISPLAY_RATE_SELECT_FPA1 (1 << 8) + +/** + * SDVO multiplier for 945G/GM. Not used on 965. + * + * \sa DPLL_MD_UDI_MULTIPLIER_MASK + */ +# define SDVO_MULTIPLIER_MASK 0x000000ff +# define SDVO_MULTIPLIER_SHIFT_HIRES 4 +# define SDVO_MULTIPLIER_SHIFT_VGA 0 + +/** @defgroup DPLL_MD + * @{ + */ +/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */ +#define DPLL_A_MD 0x0601c +/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */ +#define DPLL_B_MD 0x06020 +/** + * UDI pixel divider, controlling how many pixels are stuffed into a packet. + * + * Value is pixels minus 1. Must be set to 1 pixel for SDVO. + */ +# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 +# define DPLL_MD_UDI_DIVIDER_SHIFT 24 +/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ +# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 +# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 +/** + * SDVO/UDI pixel multiplier. + * + * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus + * clock rate is 10 times the DPLL clock. At low resolution/refresh rate + * modes, the bus rate would be below the limits, so SDVO allows for stuffing + * dummy bytes in the datastream at an increased clock rate, with both sides of + * the link knowing how many bytes are fill. + * + * So, for a mode with a dotclock of 65Mhz, we would want to double the clock + * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be + * set to 130Mhz, and the SDVO multiplier set to 2x in this register and + * through an SDVO command. + * + * This register field has values of multiplication factor minus 1, with + * a maximum multiplier of 5 for SDVO. + */ +# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 +# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 +/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. + * This best be set to the default value (3) or the CRT won't work. No, + * I don't entirely understand what this does... + */ +# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f +# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +/** @} */ + +#define DPLL_TEST 0x606c +# define DPLLB_TEST_SDVO_DIV_1 (0 << 22) +# define DPLLB_TEST_SDVO_DIV_2 (1 << 22) +# define DPLLB_TEST_SDVO_DIV_4 (2 << 22) +# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) +# define DPLLB_TEST_N_BYPASS (1 << 19) +# define DPLLB_TEST_M_BYPASS (1 << 18) +# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) +# define DPLLA_TEST_N_BYPASS (1 << 3) +# define DPLLA_TEST_M_BYPASS (1 << 2) +# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) + +#define ADPA 0x61100 +#define ADPA_DAC_ENABLE (1<<31) +#define ADPA_DAC_DISABLE 0 +#define ADPA_PIPE_SELECT_MASK (1<<30) +#define ADPA_PIPE_A_SELECT 0 +#define ADPA_PIPE_B_SELECT (1<<30) +#define ADPA_USE_VGA_HVPOLARITY (1<<15) +#define ADPA_SETS_HVPOLARITY 0 +#define ADPA_VSYNC_CNTL_DISABLE (1<<11) +#define ADPA_VSYNC_CNTL_ENABLE 0 +#define ADPA_HSYNC_CNTL_DISABLE (1<<10) +#define ADPA_HSYNC_CNTL_ENABLE 0 +#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) +#define ADPA_VSYNC_ACTIVE_LOW 0 +#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) +#define ADPA_HSYNC_ACTIVE_LOW 0 + +#define FPA0 0x06040 +#define FPA1 0x06044 +#define FPB0 0x06048 +#define FPB1 0x0604c +# define FP_N_DIV_MASK 0x003f0000 +# define FP_N_DIV_SHIFT 16 +# define FP_M1_DIV_MASK 0x00003f00 +# define FP_M1_DIV_SHIFT 8 +# define FP_M2_DIV_MASK 0x0000003f +# define FP_M2_DIV_SHIFT 0 + + +#define PORT_HOTPLUG_EN 0x61110 +# define SDVOB_HOTPLUG_INT_EN (1 << 26) +# define SDVOC_HOTPLUG_INT_EN (1 << 25) +# define TV_HOTPLUG_INT_EN (1 << 18) +# define CRT_HOTPLUG_INT_EN (1 << 9) +# define CRT_HOTPLUG_FORCE_DETECT (1 << 3) + +#define PORT_HOTPLUG_STAT 0x61114 +# define CRT_HOTPLUG_INT_STATUS (1 << 11) +# define TV_HOTPLUG_INT_STATUS (1 << 10) +# define CRT_HOTPLUG_MONITOR_MASK (3 << 8) +# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) +# define CRT_HOTPLUG_MONITOR_MONO (2 << 8) +# define CRT_HOTPLUG_MONITOR_NONE (0 << 8) +# define SDVOC_HOTPLUG_INT_STATUS (1 << 7) +# define SDVOB_HOTPLUG_INT_STATUS (1 << 6) + +#define SDVOB 0x61140 +#define SDVOC 0x61160 +#define SDVO_ENABLE (1 << 31) +#define SDVO_PIPE_B_SELECT (1 << 30) +#define SDVO_STALL_SELECT (1 << 29) +#define SDVO_INTERRUPT_ENABLE (1 << 26) +/** + * 915G/GM SDVO pixel multiplier. + * + * Programmed value is multiplier - 1, up to 5x. + * + * \sa DPLL_MD_UDI_MULTIPLIER_MASK + */ +#define SDVO_PORT_MULTIPLY_MASK (7 << 23) +#define SDVO_PORT_MULTIPLY_SHIFT 23 +#define SDVO_PHASE_SELECT_MASK (15 << 19) +#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) +#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) +#define SDVOC_GANG_MODE (1 << 16) +#define SDVO_BORDER_ENABLE (1 << 7) +#define SDVOB_PCIE_CONCURRENCY (1 << 3) +#define SDVO_DETECTED (1 << 2) +/* Bits to be preserved when writing */ +#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14)) +#define SDVOC_PRESERVE_MASK (1 << 17) + +/** @defgroup LVDS + * @{ + */ +/** + * This register controls the LVDS output enable, pipe selection, and data + * format selection. + * + * All of the clock/data pairs are force powered down by power sequencing. + */ +#define LVDS 0x61180 +/** + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as + * the DPLL semantics change when the LVDS is assigned to that pipe. + */ +# define LVDS_PORT_EN (1 << 31) +/** Selects pipe B for LVDS data. Must be set on pre-965. */ +# define LVDS_PIPEB_SELECT (1 << 30) + +/** + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per + * pixel. + */ +# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) +# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) +# define LVDS_A0A2_CLKA_POWER_UP (3 << 8) +/** + * Controls the A3 data pair, which contains the additional LSBs for 24 bit + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be + * on. + */ +# define LVDS_A3_POWER_MASK (3 << 6) +# define LVDS_A3_POWER_DOWN (0 << 6) +# define LVDS_A3_POWER_UP (3 << 6) +/** + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP + * is set. + */ +# define LVDS_CLKB_POWER_MASK (3 << 4) +# define LVDS_CLKB_POWER_DOWN (0 << 4) +# define LVDS_CLKB_POWER_UP (3 << 4) + +/** + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 + * setting for whether we are in dual-channel mode. The B3 pair will + * additionally only be powered up when LVDS_A3_POWER_UP is set. + */ +# define LVDS_B0B3_POWER_MASK (3 << 2) +# define LVDS_B0B3_POWER_DOWN (0 << 2) +# define LVDS_B0B3_POWER_UP (3 << 2) + +#define PIPEACONF 0x70008 +#define PIPEACONF_ENABLE (1<<31) +#define PIPEACONF_DISABLE 0 +#define PIPEACONF_DOUBLE_WIDE (1<<30) +#define I965_PIPECONF_ACTIVE (1<<30) +#define PIPEACONF_SINGLE_WIDE 0 +#define PIPEACONF_PIPE_UNLOCKED 0 +#define PIPEACONF_PIPE_LOCKED (1<<25) +#define PIPEACONF_PALETTE 0 +#define PIPEACONF_GAMMA (1<<24) +#define PIPECONF_FORCE_BORDER (1<<25) +#define PIPECONF_PROGRESSIVE (0 << 21) +#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) +#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) + +#define PIPEBCONF 0x71008 +#define PIPEBCONF_ENABLE (1<<31) +#define PIPEBCONF_DISABLE 0 +#define PIPEBCONF_DOUBLE_WIDE (1<<30) +#define PIPEBCONF_DISABLE 0 +#define PIPEBCONF_GAMMA (1<<24) +#define PIPEBCONF_PALETTE 0 + +#define PIPEBGCMAXRED 0x71010 +#define PIPEBGCMAXGREEN 0x71014 +#define PIPEBGCMAXBLUE 0x71018 +#define PIPEBSTAT 0x71024 +#define PIPEBFRAMEHIGH 0x71040 +#define PIPEBFRAMEPIXEL 0x71044 + +#define DSPACNTR 0x70180 +#define DSPBCNTR 0x71180 +#define DISPLAY_PLANE_ENABLE (1<<31) +#define DISPLAY_PLANE_DISABLE 0 +#define DISPPLANE_GAMMA_ENABLE (1<<30) +#define DISPPLANE_GAMMA_DISABLE 0 +#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) +#define DISPPLANE_8BPP (0x2<<26) +#define DISPPLANE_15_16BPP (0x4<<26) +#define DISPPLANE_16BPP (0x5<<26) +#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) +#define DISPPLANE_32BPP (0x7<<26) +#define DISPPLANE_STEREO_ENABLE (1<<25) +#define DISPPLANE_STEREO_DISABLE 0 +#define DISPPLANE_SEL_PIPE_MASK (1<<24) +#define DISPPLANE_SEL_PIPE_A 0 +#define DISPPLANE_SEL_PIPE_B (1<<24) +#define DISPPLANE_SRC_KEY_ENABLE (1<<22) +#define DISPPLANE_SRC_KEY_DISABLE 0 +#define DISPPLANE_LINE_DOUBLE (1<<20) +#define DISPPLANE_NO_LINE_DOUBLE 0 +#define DISPPLANE_STEREO_POLARITY_FIRST 0 +#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) +/* plane B only */ +#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) +#define DISPPLANE_ALPHA_TRANS_DISABLE 0 +#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0 +#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) + +#define DSPABASE 0x70184 +#define DSPASTRIDE 0x70188 + +#define DSPBBASE 0x71184 +#define DSPBADDR DSPBBASE +#define DSPBSTRIDE 0x71188 + +#define DSPAKEYVAL 0x70194 +#define DSPAKEYMASK 0x70198 + +#define DSPAPOS 0x7018C /* reserved */ +#define DSPASIZE 0x70190 +#define DSPBPOS 0x7118C +#define DSPBSIZE 0x71190 + +#define DSPASURF 0x7019C +#define DSPATILEOFF 0x701A4 + +#define DSPBSURF 0x7119C +#define DSPBTILEOFF 0x711A4 + +#define VGACNTRL 0x71400 +# define VGA_DISP_DISABLE (1 << 31) +# define VGA_2X_MODE (1 << 30) +# define VGA_PIPE_B_SELECT (1 << 29) + +/* + * Some BIOS scratch area registers. The 845 (and 830?) store the amount + * of video memory available to the BIOS in SWF1. + */ + +#define SWF0 0x71410 + +/* + * 855 scratch registers. + */ +#define SWF10 0x70410 + +#define SWF30 0x72414 + +/* + * Overlay registers. These are overlay registers accessed via MMIO. + * Those loaded via the overlay register page are defined in i830_video.c. + */ +#define OVADD 0x30000 + +#define DOVSTA 0x30008 +#define OC_BUF (0x3<<20) + +#define OGAMC5 0x30010 +#define OGAMC4 0x30014 +#define OGAMC3 0x30018 +#define OGAMC2 0x3001c +#define OGAMC1 0x30020 +#define OGAMC0 0x30024 +/* + * Palette registers + */ +#define PALETTE_A 0x0a000 +#define PALETTE_B 0x0a800 + +#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC) +#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG) +#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) +#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) +#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG) + +#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/ +#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG) +#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG) +#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG) + +#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ + (dev)->pci_device == 0x2982 || \ + (dev)->pci_device == 0x2992 || \ + (dev)->pci_device == 0x29A2 || \ + (dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12) + +#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) + +#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ + (dev)->pci_device == 0x29B2 || \ + (dev)->pci_device == 0x29D2) + +#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ + IS_I945GM(dev) || IS_I965G(dev)) + +#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ + IS_I945GM(dev) || IS_I965GM(dev)) + +#define PRIMARY_RINGBUFFER_SIZE (128*1024) + #endif From cc745fcc3a16cb1ffc2ab578155dc880b862f95a Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Sun, 28 Oct 2007 01:59:11 +0200 Subject: [PATCH 432/437] nouveau: don't touch PMC_BOOT_1 on x86, it seems to be undefined on some early cards. --- shared-core/nouveau_state.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index add2d598..c617bfd3 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -283,11 +283,13 @@ nouveau_card_init(struct drm_device *dev) ret = nouveau_init_card_mappings(dev); if (ret) return ret; +#if defined(__powerpc__) /* Put the card in BE mode if it's not */ if (NV_READ(NV03_PMC_BOOT_1)) NV_WRITE(NV03_PMC_BOOT_1,0x00000001); DRM_MEMORYBARRIER(); +#endif /* Determine exact chipset we're running on */ if (dev_priv->card_type < NV_10) @@ -431,8 +433,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) reg0 = readl(regs+NV03_PMC_BOOT_0); reg1 = readl(regs+NV03_PMC_BOOT_1); +#if defined(__powerpc__) if (reg1) reg0=___swab32(reg0); +#endif /* We're dealing with >=NV10 */ if ((reg0 & 0x0f000000) > 0 ) { From 6342e0507be177be309774aff0c31746beab73f6 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 29 Oct 2007 10:51:11 -0700 Subject: [PATCH 433/437] Remove unused memory save areas These need to be kmalloc'd separately anyway or we may hit kmalloc size limits. --- shared-core/i915_drv.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 07a173ac..6716f28f 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -224,10 +224,6 @@ typedef struct drm_i915_private { u8 saveDACMASK; u8 saveDACDATA[256*3]; /* 256 3-byte colors */ u8 saveCR[36]; - u8 savePLANE0[64*1024]; - u8 savePLANE1[64*1024]; - u8 savePLANE2[64*1024]; - u8 savePLANE3[64*1024]; } drm_i915_private_t; enum intel_chip_family { From ff5889f8316e0c16112f114c1c8f57645b8dc54f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Mon, 29 Oct 2007 19:32:32 -0400 Subject: [PATCH 434/437] Move struct drm_drawable_info out of public header file. --- linux-core/drmP.h | 9 +++++++++ shared-core/drm.h | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 82a3a23c..ac3ca4d2 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -586,6 +586,15 @@ struct drm_vbl_sig { struct task_struct *task; }; +/** + * Drawable information. + */ +struct drm_drawable_info { + unsigned int num_rects; + struct drm_clip_rect *rects; +}; + + /* location of GART table */ #define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_FB 2 diff --git a/shared-core/drm.h b/shared-core/drm.h index ae88ce61..3a102735 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -134,14 +134,6 @@ struct drm_clip_rect { unsigned short y2; }; -/** - * Drawable information. - */ -struct drm_drawable_info { - unsigned int num_rects; - struct drm_clip_rect *rects; -}; - /** * Texture region, */ @@ -1002,7 +994,6 @@ struct drm_mm_init_arg { /* typedef area */ #if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) typedef struct drm_clip_rect drm_clip_rect_t; -typedef struct drm_drawable_info drm_drawable_info_t; typedef struct drm_tex_region drm_tex_region_t; typedef struct drm_hw_lock drm_hw_lock_t; typedef struct drm_version drm_version_t; From 50dec29c800a6e980a01be38190e44a0ba7916b5 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 30 Oct 2007 17:51:59 +1000 Subject: [PATCH 435/437] drm/i915: add driver cache flush entry point Use clflush on Intel hardware to flush cached objects. --- linux-core/drm_objects.h | 1 + linux-core/drm_ttm.c | 7 +++++-- linux-core/i915_buffer.c | 33 +++++++++++++++++++++++++++++++++ linux-core/i915_drv.c | 1 + shared-core/i915_drv.h | 2 +- 5 files changed, 41 insertions(+), 3 deletions(-) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 8b14ac6f..cea811eb 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -464,6 +464,7 @@ struct drm_bo_driver { uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); + void (*ttm_cache_flush)(struct drm_ttm *ttm); }; /* diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 33bbe1d4..df9e7e44 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -207,6 +207,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) } return p; } +EXPORT_SYMBOL(drm_ttm_get_page); int drm_ttm_populate(struct drm_ttm * ttm) { @@ -311,7 +312,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm) int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) { - + struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; int ret = 0; struct drm_ttm_backend *be; @@ -328,7 +329,9 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); - } + } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED) && + bo_driver->ttm_cache_flush) + bo_driver->ttm_cache_flush(ttm); if ((ret = be->func->bind(be, bo_mem))) { ttm->state = ttm_evicted; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index f81def8f..bbc7e1db 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -249,3 +249,36 @@ int i915_move(struct drm_buffer_object * bo, } return 0; } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) +static inline void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(char __force *)__p)); +} +#endif + +static inline void drm_cache_flush_addr(void *virt) +{ + int i; + + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + clflush(virt+i); +} + +static inline void drm_cache_flush_page(struct page *p) +{ + drm_cache_flush_addr(page_address(p)); +} + +void i915_flush_ttm(struct drm_ttm *ttm) +{ + int i; + + if (!ttm) + return; + + DRM_MEMORYBARRIER(); + for (i = ttm->num_pages-1; i >= 0; i--) + drm_cache_flush_page(drm_ttm_get_page(ttm, i)); + DRM_MEMORYBARRIER(); +} diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index f34d218c..124db68f 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -66,6 +66,7 @@ static struct drm_bo_driver i915_bo_driver = { .init_mem_type = i915_init_mem_type, .evict_mask = i915_evict_mask, .move = i915_move, + .ttm_cache_flush = i915_flush_ttm, }; #endif diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 6716f28f..9f69d841 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -310,7 +310,7 @@ extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); extern int i915_move(struct drm_buffer_object *bo, int evict, int no_wait, struct drm_bo_mem_reg *new_mem); - +void i915_flush_ttm(struct drm_ttm *ttm); #endif #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) From 0cebcd43dd7b950c07625601b87c72329857d831 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Tue, 30 Oct 2007 16:54:57 +0100 Subject: [PATCH 436/437] Nouveau: fold some loops. --- shared-core/nv20_graph.c | 2535 +------------------------------------- 1 file changed, 15 insertions(+), 2520 deletions(-) diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 94ce32c1..e6ccf672 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -415,846 +415,11 @@ static void nv30_31_graph_context_init(struct drm_device *dev, INSTANCE_WR(ctx, 0x860/4, 0x00010000); for(i = 0x864; i< 0x874; i += 4) INSTANCE_WR(ctx, i/4, 0x00040004); - INSTANCE_WR(ctx, 0x1f18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fa0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fa8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fb0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fb8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fbc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fc0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fc8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fcc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fd0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fd8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fdc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fe0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fe8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ff0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ff8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ffc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2000/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2008/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x200c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2010/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2018/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x201c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2020/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2028/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x202c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2030/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2038/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x203c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2040/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2048/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x204c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2050/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2058/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x205c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2060/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2068/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x206c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2070/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2078/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x207c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2080/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2088/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x208c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2090/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2098/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x209c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2100/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2108/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x210c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2110/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2118/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x211c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2120/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2128/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x212c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2130/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2138/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x213c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2140/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2148/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x214c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2150/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2158/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x215c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2160/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2168/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x216c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2170/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2178/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x217c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2180/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2188/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x218c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2190/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2198/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x219c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2200/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2208/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x220c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2210/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2218/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x221c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2220/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2228/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x222c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2230/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2238/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x223c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2240/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2248/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x224c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2250/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2258/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x225c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2260/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2268/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x226c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2270/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2278/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x227c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2280/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2288/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x228c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2290/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2298/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x229c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2300/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2308/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x230c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2310/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2318/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x231c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2320/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2328/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x232c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2330/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2338/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x233c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2340/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2348/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x234c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2350/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2358/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x235c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2360/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2368/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x236c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2370/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2378/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x237c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2380/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2388/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x238c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2390/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2398/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x239c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2400/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2408/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x240c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2410/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2418/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x241c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2420/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2428/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x242c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2430/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2438/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x243c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2440/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2448/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x244c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2450/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2458/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x245c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2460/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2468/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x246c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2470/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2478/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x247c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2480/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2488/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x248c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2490/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2498/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x249c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2500/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2508/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x250c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2510/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2518/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x251c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2520/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2528/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x252c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2530/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2538/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x253c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2540/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2548/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x254c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2550/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2558/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x255c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2560/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2568/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x256c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2570/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2578/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x257c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2580/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2588/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x258c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2590/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2598/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x259c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2600/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2608/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x260c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2610/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2618/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x261c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2620/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2628/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x262c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2630/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2638/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x263c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2640/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2648/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x264c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2650/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2658/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x265c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2660/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2668/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x266c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2670/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2678/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x267c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2680/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2688/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x268c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2690/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2698/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x269c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2700/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2708/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x270c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2710/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2718/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x271c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2720/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2728/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x272c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2730/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2738/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x273c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2740/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2748/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x274c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2750/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2758/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x275c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2760/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2768/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x276c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2770/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2778/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x277c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2780/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2788/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x278c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2790/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2798/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x279c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2800/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2808/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x280c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2810/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2818/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x281c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2820/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2828/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x282c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2830/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2838/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x283c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2840/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2848/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x284c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2850/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2858/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x285c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2860/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2868/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x286c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2870/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2878/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x287c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2880/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2888/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x288c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2890/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2898/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x289c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2900/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2908/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x290c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2910/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2918/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x291c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2920/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2928/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x292c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2930/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2938/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x293c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2940/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2948/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x294c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2950/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2958/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x295c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2960/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2968/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x296c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2970/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2978/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x297c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2980/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2988/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x298c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2990/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2998/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x299c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29a0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29a8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29ac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29b0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29b8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29bc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29c0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29c8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29cc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29d0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29d8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29dc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29e0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29e8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29ec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29f0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29f8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29fc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a00/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a08/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a0c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a10/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2aa0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2aa8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2aac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ab0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ab8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2abc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ac0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ac8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2acc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ad0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ad8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2adc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ae0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ae8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2aec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2af0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2af8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2afc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b00/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b08/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b0c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b10/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ba0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ba8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bb0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bb8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bbc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bc0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bc8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bcc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bd0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bd8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bdc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2be0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2be8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bf0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bf8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bfc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c00/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c08/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c0c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c10/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ca0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ca8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cb0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cb8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cbc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cc0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cc8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ccc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cd0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cd8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cdc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ce0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ce8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cf0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cf8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cfc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d00/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d08/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d0c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d10/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2da0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2da8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2db0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2db8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dbc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2dc0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dc8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dcc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2dd0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dd8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ddc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2de0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2de8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2df0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2df8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dfc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e00/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e08/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e0c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e10/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ea0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ea8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2eac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2eb0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2eb8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ebc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ec0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ec8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ecc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ed0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ed8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2edc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ee0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ee8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2eec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ef0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ef8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2efc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f00/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f08/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f0c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f10/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f18/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f1c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f20/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f28/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f2c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f30/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f38/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f3c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f40/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f48/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f4c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f50/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f58/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f5c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f60/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f68/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f6c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f70/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f78/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f7c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f80/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f88/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f8c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f90/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f98/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f9c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fa0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fa8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fac/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fb0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fb8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fbc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fc0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fc8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fcc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fd0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fd8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fdc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fe0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fe8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fec/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ff0/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ff8/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ffc/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3000/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3008/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x300c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3010/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3018/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x301c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3020/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3028/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x302c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3030/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3038/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x303c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3040/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3048/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x304c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3050/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3058/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x305c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3060/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3068/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x306c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3070/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3078/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x307c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3080/4, 0x000c001b); - INSTANCE_WR(ctx, 0x3088/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x308c/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3090/4, 0x000c001b); + for(i = 0x1f18; i<= 0x3088 ; i+= 16) { + INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); + INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); + INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); + } for(i = 0x30b8; i< 0x30c8; i += 4) INSTANCE_WR(ctx, i/4, 0x0000ffff); INSTANCE_WR(ctx, 0x344c/4, 0x3f800000); @@ -1309,846 +474,11 @@ static void nv34_graph_context_init(struct drm_device *dev, INSTANCE_WR(ctx, 0x854/4, 0x00010000); for(i = 0x858; i< 0x868; i += 4) INSTANCE_WR(ctx, i/4, 0x00040004); - INSTANCE_WR(ctx, 0x15ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x15b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x15b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x15bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x15c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x15c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x15cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x15d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x15d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x15dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x15e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x15e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x15ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x15f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x15f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x15fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1600/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1604/4, 0x000c001b); - INSTANCE_WR(ctx, 0x160c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1610/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1614/4, 0x000c001b); - INSTANCE_WR(ctx, 0x161c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1620/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1624/4, 0x000c001b); - INSTANCE_WR(ctx, 0x162c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1630/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1634/4, 0x000c001b); - INSTANCE_WR(ctx, 0x163c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1640/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1644/4, 0x000c001b); - INSTANCE_WR(ctx, 0x164c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1650/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1654/4, 0x000c001b); - INSTANCE_WR(ctx, 0x165c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1660/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1664/4, 0x000c001b); - INSTANCE_WR(ctx, 0x166c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1670/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1674/4, 0x000c001b); - INSTANCE_WR(ctx, 0x167c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1680/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1684/4, 0x000c001b); - INSTANCE_WR(ctx, 0x168c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1690/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1694/4, 0x000c001b); - INSTANCE_WR(ctx, 0x169c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x16a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x16a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x16ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x16b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x16b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x16bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x16c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x16c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x16cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x16d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x16d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x16dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x16e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x16e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x16ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x16f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x16f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x16fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1700/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1704/4, 0x000c001b); - INSTANCE_WR(ctx, 0x170c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1710/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1714/4, 0x000c001b); - INSTANCE_WR(ctx, 0x171c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1720/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1724/4, 0x000c001b); - INSTANCE_WR(ctx, 0x172c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1730/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1734/4, 0x000c001b); - INSTANCE_WR(ctx, 0x173c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1740/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1744/4, 0x000c001b); - INSTANCE_WR(ctx, 0x174c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1750/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1754/4, 0x000c001b); - INSTANCE_WR(ctx, 0x175c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1760/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1764/4, 0x000c001b); - INSTANCE_WR(ctx, 0x176c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1770/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1774/4, 0x000c001b); - INSTANCE_WR(ctx, 0x177c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1780/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1784/4, 0x000c001b); - INSTANCE_WR(ctx, 0x178c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1790/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1794/4, 0x000c001b); - INSTANCE_WR(ctx, 0x179c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x17a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x17a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x17ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x17b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x17b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x17bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x17c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x17c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x17cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x17d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x17d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x17dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x17e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x17e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x17ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x17f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x17f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x17fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1800/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1804/4, 0x000c001b); - INSTANCE_WR(ctx, 0x180c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1810/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1814/4, 0x000c001b); - INSTANCE_WR(ctx, 0x181c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1820/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1824/4, 0x000c001b); - INSTANCE_WR(ctx, 0x182c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1830/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1834/4, 0x000c001b); - INSTANCE_WR(ctx, 0x183c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1840/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1844/4, 0x000c001b); - INSTANCE_WR(ctx, 0x184c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1850/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1854/4, 0x000c001b); - INSTANCE_WR(ctx, 0x185c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1860/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1864/4, 0x000c001b); - INSTANCE_WR(ctx, 0x186c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1870/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1874/4, 0x000c001b); - INSTANCE_WR(ctx, 0x187c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1880/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1884/4, 0x000c001b); - INSTANCE_WR(ctx, 0x188c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1890/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1894/4, 0x000c001b); - INSTANCE_WR(ctx, 0x189c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x18a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x18a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x18ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x18b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x18b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x18bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x18c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x18c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x18cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x18d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x18d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x18dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x18e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x18e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x18ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x18f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x18f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x18fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1900/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1904/4, 0x000c001b); - INSTANCE_WR(ctx, 0x190c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1910/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1914/4, 0x000c001b); - INSTANCE_WR(ctx, 0x191c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1920/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1924/4, 0x000c001b); - INSTANCE_WR(ctx, 0x192c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1930/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1934/4, 0x000c001b); - INSTANCE_WR(ctx, 0x193c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1940/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1944/4, 0x000c001b); - INSTANCE_WR(ctx, 0x194c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1950/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1954/4, 0x000c001b); - INSTANCE_WR(ctx, 0x195c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1960/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1964/4, 0x000c001b); - INSTANCE_WR(ctx, 0x196c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1970/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1974/4, 0x000c001b); - INSTANCE_WR(ctx, 0x197c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1980/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1984/4, 0x000c001b); - INSTANCE_WR(ctx, 0x198c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1990/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1994/4, 0x000c001b); - INSTANCE_WR(ctx, 0x199c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x19a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x19a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x19ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x19b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x19b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x19bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x19c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x19c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x19cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x19d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x19d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x19dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x19e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x19e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x19ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x19f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x19f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x19fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1a90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1a94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1a9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1aa0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1aa4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1aac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ab0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ab4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1abc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ac0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ac4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1acc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ad0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ad4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1adc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ae0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ae4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1aec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1af0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1af4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1afc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1b90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1b94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1b9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ba0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ba4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1bac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1bb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1bb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1bbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1bc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1bc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1bcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1bd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1bd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1bdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1be0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1be4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1bec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1bf0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1bf4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1bfc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1c90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1c94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1c9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ca0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ca4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1cac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1cb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1cb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1cbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1cc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1cc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ccc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1cd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1cd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1cdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ce0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ce4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1cec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1cf0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1cf4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1cfc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1d90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1d94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1d9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1da0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1da4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1dac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1db0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1db4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1dbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1dc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1dc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1dcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1dd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1dd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ddc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1de0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1de4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1dec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1df0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1df4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1dfc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1e90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1e94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1e9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ea0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ea4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1eac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1eb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1eb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ebc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ec0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ec4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ecc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ed0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ed4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1edc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ee0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ee4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1eec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ef0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ef4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1efc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fa0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fa4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fe0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fe4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ff0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ff4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ffc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2000/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2004/4, 0x000c001b); - INSTANCE_WR(ctx, 0x200c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2010/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2014/4, 0x000c001b); - INSTANCE_WR(ctx, 0x201c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2020/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2024/4, 0x000c001b); - INSTANCE_WR(ctx, 0x202c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2030/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2034/4, 0x000c001b); - INSTANCE_WR(ctx, 0x203c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2040/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2044/4, 0x000c001b); - INSTANCE_WR(ctx, 0x204c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2050/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2054/4, 0x000c001b); - INSTANCE_WR(ctx, 0x205c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2060/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2064/4, 0x000c001b); - INSTANCE_WR(ctx, 0x206c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2070/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2074/4, 0x000c001b); - INSTANCE_WR(ctx, 0x207c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2080/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2084/4, 0x000c001b); - INSTANCE_WR(ctx, 0x208c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2090/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2094/4, 0x000c001b); - INSTANCE_WR(ctx, 0x209c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2100/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2104/4, 0x000c001b); - INSTANCE_WR(ctx, 0x210c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2110/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2114/4, 0x000c001b); - INSTANCE_WR(ctx, 0x211c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2120/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2124/4, 0x000c001b); - INSTANCE_WR(ctx, 0x212c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2130/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2134/4, 0x000c001b); - INSTANCE_WR(ctx, 0x213c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2140/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2144/4, 0x000c001b); - INSTANCE_WR(ctx, 0x214c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2150/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2154/4, 0x000c001b); - INSTANCE_WR(ctx, 0x215c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2160/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2164/4, 0x000c001b); - INSTANCE_WR(ctx, 0x216c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2170/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2174/4, 0x000c001b); - INSTANCE_WR(ctx, 0x217c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2180/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2184/4, 0x000c001b); - INSTANCE_WR(ctx, 0x218c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2190/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2194/4, 0x000c001b); - INSTANCE_WR(ctx, 0x219c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2200/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2204/4, 0x000c001b); - INSTANCE_WR(ctx, 0x220c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2210/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2214/4, 0x000c001b); - INSTANCE_WR(ctx, 0x221c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2220/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2224/4, 0x000c001b); - INSTANCE_WR(ctx, 0x222c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2230/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2234/4, 0x000c001b); - INSTANCE_WR(ctx, 0x223c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2240/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2244/4, 0x000c001b); - INSTANCE_WR(ctx, 0x224c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2250/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2254/4, 0x000c001b); - INSTANCE_WR(ctx, 0x225c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2260/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2264/4, 0x000c001b); - INSTANCE_WR(ctx, 0x226c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2270/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2274/4, 0x000c001b); - INSTANCE_WR(ctx, 0x227c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2280/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2284/4, 0x000c001b); - INSTANCE_WR(ctx, 0x228c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2290/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2294/4, 0x000c001b); - INSTANCE_WR(ctx, 0x229c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2300/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2304/4, 0x000c001b); - INSTANCE_WR(ctx, 0x230c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2310/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2314/4, 0x000c001b); - INSTANCE_WR(ctx, 0x231c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2320/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2324/4, 0x000c001b); - INSTANCE_WR(ctx, 0x232c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2330/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2334/4, 0x000c001b); - INSTANCE_WR(ctx, 0x233c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2340/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2344/4, 0x000c001b); - INSTANCE_WR(ctx, 0x234c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2350/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2354/4, 0x000c001b); - INSTANCE_WR(ctx, 0x235c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2360/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2364/4, 0x000c001b); - INSTANCE_WR(ctx, 0x236c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2370/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2374/4, 0x000c001b); - INSTANCE_WR(ctx, 0x237c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2380/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2384/4, 0x000c001b); - INSTANCE_WR(ctx, 0x238c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2390/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2394/4, 0x000c001b); - INSTANCE_WR(ctx, 0x239c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2400/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2404/4, 0x000c001b); - INSTANCE_WR(ctx, 0x240c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2410/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2414/4, 0x000c001b); - INSTANCE_WR(ctx, 0x241c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2420/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2424/4, 0x000c001b); - INSTANCE_WR(ctx, 0x242c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2430/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2434/4, 0x000c001b); - INSTANCE_WR(ctx, 0x243c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2440/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2444/4, 0x000c001b); - INSTANCE_WR(ctx, 0x244c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2450/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2454/4, 0x000c001b); - INSTANCE_WR(ctx, 0x245c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2460/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2464/4, 0x000c001b); - INSTANCE_WR(ctx, 0x246c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2470/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2474/4, 0x000c001b); - INSTANCE_WR(ctx, 0x247c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2480/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2484/4, 0x000c001b); - INSTANCE_WR(ctx, 0x248c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2490/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2494/4, 0x000c001b); - INSTANCE_WR(ctx, 0x249c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2500/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2504/4, 0x000c001b); - INSTANCE_WR(ctx, 0x250c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2510/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2514/4, 0x000c001b); - INSTANCE_WR(ctx, 0x251c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2520/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2524/4, 0x000c001b); - INSTANCE_WR(ctx, 0x252c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2530/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2534/4, 0x000c001b); - INSTANCE_WR(ctx, 0x253c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2540/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2544/4, 0x000c001b); - INSTANCE_WR(ctx, 0x254c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2550/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2554/4, 0x000c001b); - INSTANCE_WR(ctx, 0x255c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2560/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2564/4, 0x000c001b); - INSTANCE_WR(ctx, 0x256c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2570/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2574/4, 0x000c001b); - INSTANCE_WR(ctx, 0x257c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2580/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2584/4, 0x000c001b); - INSTANCE_WR(ctx, 0x258c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2590/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2594/4, 0x000c001b); - INSTANCE_WR(ctx, 0x259c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2600/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2604/4, 0x000c001b); - INSTANCE_WR(ctx, 0x260c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2610/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2614/4, 0x000c001b); - INSTANCE_WR(ctx, 0x261c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2620/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2624/4, 0x000c001b); - INSTANCE_WR(ctx, 0x262c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2630/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2634/4, 0x000c001b); - INSTANCE_WR(ctx, 0x263c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2640/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2644/4, 0x000c001b); - INSTANCE_WR(ctx, 0x264c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2650/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2654/4, 0x000c001b); - INSTANCE_WR(ctx, 0x265c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2660/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2664/4, 0x000c001b); - INSTANCE_WR(ctx, 0x266c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2670/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2674/4, 0x000c001b); - INSTANCE_WR(ctx, 0x267c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2680/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2684/4, 0x000c001b); - INSTANCE_WR(ctx, 0x268c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2690/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2694/4, 0x000c001b); - INSTANCE_WR(ctx, 0x269c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2700/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2704/4, 0x000c001b); - INSTANCE_WR(ctx, 0x270c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2710/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2714/4, 0x000c001b); - INSTANCE_WR(ctx, 0x271c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2720/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2724/4, 0x000c001b); + for(i = 0x15ac; i<= 0x271c ; i+= 16) { + INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); + INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); + INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); + } for(i = 0x274c; i< 0x275c; i += 4) INSTANCE_WR(ctx, i/4, 0x0000ffff); INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000); @@ -2203,846 +533,11 @@ static void nv35_36_graph_context_init(struct drm_device *dev, INSTANCE_WR(ctx, 0x864/4, 0x00010000); for(i = 0x868; i< 0x878; i += 4) INSTANCE_WR(ctx, i/4, 0x00040004); - INSTANCE_WR(ctx, 0x1f1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1f90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1f94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1f9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fa0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fa4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1fe0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1fe4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1fec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x1ff0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x1ff4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x1ffc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2000/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2004/4, 0x000c001b); - INSTANCE_WR(ctx, 0x200c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2010/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2014/4, 0x000c001b); - INSTANCE_WR(ctx, 0x201c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2020/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2024/4, 0x000c001b); - INSTANCE_WR(ctx, 0x202c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2030/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2034/4, 0x000c001b); - INSTANCE_WR(ctx, 0x203c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2040/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2044/4, 0x000c001b); - INSTANCE_WR(ctx, 0x204c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2050/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2054/4, 0x000c001b); - INSTANCE_WR(ctx, 0x205c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2060/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2064/4, 0x000c001b); - INSTANCE_WR(ctx, 0x206c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2070/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2074/4, 0x000c001b); - INSTANCE_WR(ctx, 0x207c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2080/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2084/4, 0x000c001b); - INSTANCE_WR(ctx, 0x208c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2090/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2094/4, 0x000c001b); - INSTANCE_WR(ctx, 0x209c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x20f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x20f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x20fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2100/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2104/4, 0x000c001b); - INSTANCE_WR(ctx, 0x210c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2110/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2114/4, 0x000c001b); - INSTANCE_WR(ctx, 0x211c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2120/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2124/4, 0x000c001b); - INSTANCE_WR(ctx, 0x212c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2130/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2134/4, 0x000c001b); - INSTANCE_WR(ctx, 0x213c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2140/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2144/4, 0x000c001b); - INSTANCE_WR(ctx, 0x214c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2150/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2154/4, 0x000c001b); - INSTANCE_WR(ctx, 0x215c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2160/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2164/4, 0x000c001b); - INSTANCE_WR(ctx, 0x216c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2170/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2174/4, 0x000c001b); - INSTANCE_WR(ctx, 0x217c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2180/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2184/4, 0x000c001b); - INSTANCE_WR(ctx, 0x218c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2190/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2194/4, 0x000c001b); - INSTANCE_WR(ctx, 0x219c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x21f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x21f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x21fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2200/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2204/4, 0x000c001b); - INSTANCE_WR(ctx, 0x220c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2210/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2214/4, 0x000c001b); - INSTANCE_WR(ctx, 0x221c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2220/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2224/4, 0x000c001b); - INSTANCE_WR(ctx, 0x222c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2230/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2234/4, 0x000c001b); - INSTANCE_WR(ctx, 0x223c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2240/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2244/4, 0x000c001b); - INSTANCE_WR(ctx, 0x224c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2250/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2254/4, 0x000c001b); - INSTANCE_WR(ctx, 0x225c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2260/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2264/4, 0x000c001b); - INSTANCE_WR(ctx, 0x226c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2270/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2274/4, 0x000c001b); - INSTANCE_WR(ctx, 0x227c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2280/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2284/4, 0x000c001b); - INSTANCE_WR(ctx, 0x228c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2290/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2294/4, 0x000c001b); - INSTANCE_WR(ctx, 0x229c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x22f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x22f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x22fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2300/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2304/4, 0x000c001b); - INSTANCE_WR(ctx, 0x230c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2310/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2314/4, 0x000c001b); - INSTANCE_WR(ctx, 0x231c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2320/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2324/4, 0x000c001b); - INSTANCE_WR(ctx, 0x232c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2330/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2334/4, 0x000c001b); - INSTANCE_WR(ctx, 0x233c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2340/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2344/4, 0x000c001b); - INSTANCE_WR(ctx, 0x234c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2350/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2354/4, 0x000c001b); - INSTANCE_WR(ctx, 0x235c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2360/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2364/4, 0x000c001b); - INSTANCE_WR(ctx, 0x236c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2370/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2374/4, 0x000c001b); - INSTANCE_WR(ctx, 0x237c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2380/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2384/4, 0x000c001b); - INSTANCE_WR(ctx, 0x238c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2390/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2394/4, 0x000c001b); - INSTANCE_WR(ctx, 0x239c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x23f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x23f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x23fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2400/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2404/4, 0x000c001b); - INSTANCE_WR(ctx, 0x240c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2410/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2414/4, 0x000c001b); - INSTANCE_WR(ctx, 0x241c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2420/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2424/4, 0x000c001b); - INSTANCE_WR(ctx, 0x242c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2430/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2434/4, 0x000c001b); - INSTANCE_WR(ctx, 0x243c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2440/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2444/4, 0x000c001b); - INSTANCE_WR(ctx, 0x244c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2450/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2454/4, 0x000c001b); - INSTANCE_WR(ctx, 0x245c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2460/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2464/4, 0x000c001b); - INSTANCE_WR(ctx, 0x246c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2470/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2474/4, 0x000c001b); - INSTANCE_WR(ctx, 0x247c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2480/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2484/4, 0x000c001b); - INSTANCE_WR(ctx, 0x248c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2490/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2494/4, 0x000c001b); - INSTANCE_WR(ctx, 0x249c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x24f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x24f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x24fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2500/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2504/4, 0x000c001b); - INSTANCE_WR(ctx, 0x250c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2510/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2514/4, 0x000c001b); - INSTANCE_WR(ctx, 0x251c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2520/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2524/4, 0x000c001b); - INSTANCE_WR(ctx, 0x252c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2530/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2534/4, 0x000c001b); - INSTANCE_WR(ctx, 0x253c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2540/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2544/4, 0x000c001b); - INSTANCE_WR(ctx, 0x254c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2550/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2554/4, 0x000c001b); - INSTANCE_WR(ctx, 0x255c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2560/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2564/4, 0x000c001b); - INSTANCE_WR(ctx, 0x256c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2570/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2574/4, 0x000c001b); - INSTANCE_WR(ctx, 0x257c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2580/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2584/4, 0x000c001b); - INSTANCE_WR(ctx, 0x258c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2590/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2594/4, 0x000c001b); - INSTANCE_WR(ctx, 0x259c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x25f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x25f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x25fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2600/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2604/4, 0x000c001b); - INSTANCE_WR(ctx, 0x260c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2610/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2614/4, 0x000c001b); - INSTANCE_WR(ctx, 0x261c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2620/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2624/4, 0x000c001b); - INSTANCE_WR(ctx, 0x262c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2630/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2634/4, 0x000c001b); - INSTANCE_WR(ctx, 0x263c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2640/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2644/4, 0x000c001b); - INSTANCE_WR(ctx, 0x264c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2650/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2654/4, 0x000c001b); - INSTANCE_WR(ctx, 0x265c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2660/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2664/4, 0x000c001b); - INSTANCE_WR(ctx, 0x266c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2670/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2674/4, 0x000c001b); - INSTANCE_WR(ctx, 0x267c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2680/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2684/4, 0x000c001b); - INSTANCE_WR(ctx, 0x268c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2690/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2694/4, 0x000c001b); - INSTANCE_WR(ctx, 0x269c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x26f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x26f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x26fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2700/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2704/4, 0x000c001b); - INSTANCE_WR(ctx, 0x270c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2710/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2714/4, 0x000c001b); - INSTANCE_WR(ctx, 0x271c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2720/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2724/4, 0x000c001b); - INSTANCE_WR(ctx, 0x272c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2730/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2734/4, 0x000c001b); - INSTANCE_WR(ctx, 0x273c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2740/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2744/4, 0x000c001b); - INSTANCE_WR(ctx, 0x274c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2750/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2754/4, 0x000c001b); - INSTANCE_WR(ctx, 0x275c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2760/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2764/4, 0x000c001b); - INSTANCE_WR(ctx, 0x276c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2770/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2774/4, 0x000c001b); - INSTANCE_WR(ctx, 0x277c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2780/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2784/4, 0x000c001b); - INSTANCE_WR(ctx, 0x278c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2790/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2794/4, 0x000c001b); - INSTANCE_WR(ctx, 0x279c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x27f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x27f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x27fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2800/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2804/4, 0x000c001b); - INSTANCE_WR(ctx, 0x280c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2810/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2814/4, 0x000c001b); - INSTANCE_WR(ctx, 0x281c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2820/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2824/4, 0x000c001b); - INSTANCE_WR(ctx, 0x282c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2830/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2834/4, 0x000c001b); - INSTANCE_WR(ctx, 0x283c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2840/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2844/4, 0x000c001b); - INSTANCE_WR(ctx, 0x284c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2850/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2854/4, 0x000c001b); - INSTANCE_WR(ctx, 0x285c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2860/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2864/4, 0x000c001b); - INSTANCE_WR(ctx, 0x286c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2870/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2874/4, 0x000c001b); - INSTANCE_WR(ctx, 0x287c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2880/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2884/4, 0x000c001b); - INSTANCE_WR(ctx, 0x288c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2890/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2894/4, 0x000c001b); - INSTANCE_WR(ctx, 0x289c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x28f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x28f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x28fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2900/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2904/4, 0x000c001b); - INSTANCE_WR(ctx, 0x290c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2910/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2914/4, 0x000c001b); - INSTANCE_WR(ctx, 0x291c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2920/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2924/4, 0x000c001b); - INSTANCE_WR(ctx, 0x292c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2930/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2934/4, 0x000c001b); - INSTANCE_WR(ctx, 0x293c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2940/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2944/4, 0x000c001b); - INSTANCE_WR(ctx, 0x294c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2950/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2954/4, 0x000c001b); - INSTANCE_WR(ctx, 0x295c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2960/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2964/4, 0x000c001b); - INSTANCE_WR(ctx, 0x296c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2970/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2974/4, 0x000c001b); - INSTANCE_WR(ctx, 0x297c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2980/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2984/4, 0x000c001b); - INSTANCE_WR(ctx, 0x298c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2990/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2994/4, 0x000c001b); - INSTANCE_WR(ctx, 0x299c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29a0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29a4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29ac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29b0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29b4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29bc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29c0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29c4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29cc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29d0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29d4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29dc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29e0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29e4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29ec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x29f0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x29f4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x29fc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2a90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2a94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2a9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2aa0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2aa4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2aac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ab0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ab4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2abc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ac0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ac4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2acc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ad0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ad4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2adc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ae0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ae4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2aec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2af0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2af4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2afc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2b90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2b94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2b9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ba0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ba4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2be0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2be4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2bf0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2bf4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2bfc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2c90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2c94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2c9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ca0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ca4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ccc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ce0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ce4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2cf0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2cf4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2cfc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2d90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2d94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2d9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2da0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2da4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2db0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2db4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2dc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2dd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2dd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ddc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2de0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2de4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2df0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2df4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2dfc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2e90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2e94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2e9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ea0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ea4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2eac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2eb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2eb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ebc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ec0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ec4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ecc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ed0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ed4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2edc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ee0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ee4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2eec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ef0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ef4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2efc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f00/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f04/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f0c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f10/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f14/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f1c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f20/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f24/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f2c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f30/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f34/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f3c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f40/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f44/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f4c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f50/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f54/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f5c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f60/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f64/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f6c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f70/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f74/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f7c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f80/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f84/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f8c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2f90/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2f94/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2f9c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fa0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fa4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fac/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fb0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fb4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fbc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fc0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fc4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fcc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fd0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fd4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fdc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2fe0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2fe4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2fec/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x2ff0/4, 0x0436086c); - INSTANCE_WR(ctx, 0x2ff4/4, 0x000c001b); - INSTANCE_WR(ctx, 0x2ffc/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3000/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3004/4, 0x000c001b); - INSTANCE_WR(ctx, 0x300c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3010/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3014/4, 0x000c001b); - INSTANCE_WR(ctx, 0x301c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3020/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3024/4, 0x000c001b); - INSTANCE_WR(ctx, 0x302c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3030/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3034/4, 0x000c001b); - INSTANCE_WR(ctx, 0x303c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3040/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3044/4, 0x000c001b); - INSTANCE_WR(ctx, 0x304c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3050/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3054/4, 0x000c001b); - INSTANCE_WR(ctx, 0x305c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3060/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3064/4, 0x000c001b); - INSTANCE_WR(ctx, 0x306c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3070/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3074/4, 0x000c001b); - INSTANCE_WR(ctx, 0x307c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3080/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3084/4, 0x000c001b); - INSTANCE_WR(ctx, 0x308c/4, 0x10700ff9); - INSTANCE_WR(ctx, 0x3090/4, 0x0436086c); - INSTANCE_WR(ctx, 0x3094/4, 0x000c001b); + for(i = 0x1f1c; i<= 0x308c ; i+= 16) { + INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); + INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); + INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); + } for(i = 0x30bc; i< 0x30cc; i += 4) INSTANCE_WR(ctx, i/4, 0x0000ffff); INSTANCE_WR(ctx, 0x3450/4, 0x3f800000); From 79744d730c90019edd367eee4a8ec1fa22d53402 Mon Sep 17 00:00:00 2001 From: Stephane Marchesin Date: Tue, 30 Oct 2007 16:55:17 +0100 Subject: [PATCH 437/437] Nouveau: add a comment about SKIPS for next API breakage. --- shared-core/nouveau_dma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/shared-core/nouveau_dma.c b/shared-core/nouveau_dma.c index ab502e6a..b33df588 100644 --- a/shared-core/nouveau_dma.c +++ b/shared-core/nouveau_dma.c @@ -29,6 +29,9 @@ #include "nouveau_drv.h" #include "nouveau_dma.h" +/* FIXME : should go into a nouveau_drm.h define ? + * (it's shared between DRI & DDX & DRM) + */ #define SKIPS 8 int