First pass merge of XFree86 4.2.0 import.

main
David Dawes 2002-01-27 20:05:42 +00:00
parent 14945ada16
commit 44aa4d6297
66 changed files with 4774 additions and 10691 deletions

View File

@ -1,5 +1,6 @@
# $FreeBSD$ # $FreeBSD$
SUBDIR = drm tdfx mga gamma # i810, i830 & sis are not complete
SUBDIR = tdfx mga r128 radeon gamma # i810 sis i830
.include <bsd.subdir.mk> .include <bsd.subdir.mk>

View File

@ -1,8 +1,8 @@
/* drmP.h -- Private header for Direct Rendering Manager -*- c -*- /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
* *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved. * All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -19,137 +19,67 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drmP.h,v 1.3 2001/03/06 16:45:26 dawes Exp $
* *
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/ */
#ifndef _DRM_P_H_ #ifndef _DRM_P_H_
#define _DRM_P_H_ #define _DRM_P_H_
#ifdef _KERNEL #if defined(_KERNEL) || defined(__KERNEL__)
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/stat.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <sys/filio.h>
#include <sys/sysctl.h>
#include <sys/select.h>
#include <sys/bus.h>
#if __FreeBSD_version >= 400005
#include <sys/taskqueue.h>
#endif
#if __FreeBSD_version >= 400006 /* DRM template customization defaults
#define DRM_AGP
#endif
#ifdef DRM_AGP
#include <pci/agpvar.h>
#endif
#include "drm.h"
typedef u_int32_t atomic_t;
typedef u_int32_t cycles_t;
typedef u_int32_t spinlock_t;
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_subtract_int(p, 1)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_subtract_int(p, n)
/* The version number here is a guess */
#if __FreeBSD_version >= 500010
#define callout_init(a) callout_init(a, 0)
#endif
/* Fake this */
static __inline u_int32_t
test_and_set_bit(int b, volatile u_int32_t *p)
{
int s = splhigh();
u_int32_t m = 1<<b;
u_int32_t r = *p & m;
*p |= m;
splx(s);
return r;
}
static __inline void
clear_bit(int b, volatile u_int32_t *p)
{
atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f));
}
static __inline void
set_bit(int b, volatile u_int32_t *p)
{
atomic_set_int(p + (b >> 5), 1 << (b & 0x1f));
}
static __inline int
test_bit(int b, volatile u_int32_t *p)
{
return p[b >> 5] & (1 << (b & 0x1f));
}
static __inline int
find_first_zero_bit(volatile u_int32_t *p, int max)
{
int b;
for (b = 0; b < max; b += 32) {
if (p[b >> 5] != ~0) {
for (;;) {
if ((p[b >> 5] & (1 << (b & 0x1f))) == 0)
return b;
b++;
}
}
}
return max;
}
#define spldrm() spltty()
#define memset(p, v, s) bzero(p, s)
/*
* Fake out the module macros for versions of FreeBSD where they don't
* exist.
*/ */
#if __FreeBSD_version < 400002 #ifndef __HAVE_AGP
#define __HAVE_AGP 0
#define MODULE_VERSION(a,b) struct __hack #endif
#define MODULE_DEPEND(a,b,c,d,e) struct __hack #ifndef __HAVE_MTRR
#define __HAVE_MTRR 0
#endif
#ifndef __HAVE_CTX_BITMAP
#define __HAVE_CTX_BITMAP 0
#endif
#ifndef __HAVE_DMA
#define __HAVE_DMA 0
#endif
#ifndef __HAVE_DMA_IRQ
#define __HAVE_DMA_IRQ 0
#endif
#ifndef __HAVE_DMA_WAITLIST
#define __HAVE_DMA_WAITLIST 0
#endif
#ifndef __HAVE_DMA_FREELIST
#define __HAVE_DMA_FREELIST 0
#endif
#ifndef __HAVE_DMA_HISTOGRAM
#define __HAVE_DMA_HISTOGRAM 0
#endif #endif
#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then #define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
also include looping detection. */ also include looping detection. */
#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
typedef struct drm_device drm_device_t;
typedef struct drm_file drm_file_t;
/* There's undoubtably more of this file to go into these OS dependent ones. */
#include "drm_os_freebsd.h"
#include "drm.h"
/* Begin the DRM... */
#define DRM_HASH_SIZE 16 /* Size of key hash table */ #define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000 #define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */ #define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ #define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01 #define DRM_FLAG_DEBUG 0x01
@ -174,63 +104,27 @@ find_first_zero_bit(volatile u_int32_t *p, int max)
#define DRM_MEM_TOTALAGP 16 #define DRM_MEM_TOTALAGP 16
#define DRM_MEM_BOUNDAGP 17 #define DRM_MEM_BOUNDAGP 17
#define DRM_MEM_CTXBITMAP 18 #define DRM_MEM_CTXBITMAP 18
#define DRM_MEM_STUB 19
#define DRM_MEM_SGLISTS 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
/* Backward compatibility section */ /* Backward compatibility section */
/* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
#ifndef _PAGE_PWT #ifndef _PAGE_PWT
/* The name of _PAGE_WT was changed to
_PAGE_PWT in Linux 2.2.6 */
#define _PAGE_PWT _PAGE_WT #define _PAGE_PWT _PAGE_WT
#endif #endif
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock) /* Mapping helper macros */
#define _DRM_CAS(lock,old,new,__ret) \ #define DRM_IOREMAP(map) \
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
#define DRM_IOREMAPFREE(map) \
do { \ do { \
int __dummy; /* Can't mark eax as clobbered */ \ if ( (map)->handle && (map)->size ) \
__asm__ __volatile__( \ DRM(ioremapfree)( (map)->handle, (map)->size ); \
"lock ; cmpxchg %4,%1\n\t" \
"setnz %0" \
: "=d" (__ret), \
"=m" (__drm_dummy_lock(lock)), \
"=a" (__dummy) \
: "2" (old), \
"r" (new)); \
} while (0) } while (0)
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
#define DRM_MEM_ERROR(area, fmt, arg...) \
printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
drm_mem_stats[area].name , ##arg)
#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, arg...) \
do { \
if (drm_flags&DRM_FLAG_DEBUG) \
printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
##arg); \
} while (0)
#else
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#endif
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
#define DRM_SYSCTL_PRINT(fmt, arg...) \
snprintf(buf, sizeof(buf), fmt, ##arg); \
error = SYSCTL_OUT(req, buf, strlen(buf)); \
if (error) return error;
#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
snprintf(buf, sizeof(buf), fmt, ##arg); \
error = SYSCTL_OUT(req, buf, strlen(buf)); \
if (error) { ret; return error; }
/* Internal types and structures */ /* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
#define DRM_MIN(a,b) ((a)<(b)?(a):(b)) #define DRM_MIN(a,b) ((a)<(b)?(a):(b))
@ -240,6 +134,16 @@ find_first_zero_bit(volatile u_int32_t *p, int max)
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) #define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
(_map) = (_dev)->context_sareas[_ctx]; \
} while(0)
typedef struct drm_pci_list {
u16 vendor;
u16 device;
} drm_pci_list_t;
typedef struct drm_ioctl_desc { typedef struct drm_ioctl_desc {
d_ioctl_t *func; d_ioctl_t *func;
int auth_needed; int auth_needed;
@ -279,7 +183,7 @@ typedef struct drm_buf {
struct drm_buf *next; /* Kernel-only: used for free list */ struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */ __volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */ __volatile__ int pending; /* On hardware DMA queue */
int dma_wait; /* Processes waiting */ wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */ pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */ int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */ int while_locked;/* Dispatch this buffer while locked */
@ -292,15 +196,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5 DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */ } list; /* Which list we're on */
void *dev_private;
int dev_priv_size;
#if DRM_DMA_HISTOGRAM #if DRM_DMA_HISTOGRAM
struct timespec time_queued; /* Queued to kernel DMA queue */ cycles_t time_queued; /* Queued to kernel DMA queue */
struct timespec time_dispatched; /* Dispatched to hardware */ cycles_t time_dispatched; /* Dispatched to hardware */
struct timespec time_completed; /* Completed by hardware */ cycles_t time_completed; /* Completed by hardware */
struct timespec time_freed; /* Back on freelist */ cycles_t time_freed; /* Back on freelist */
#endif #endif
int dev_priv_size; /* Size of buffer private stoarge */
void *dev_private; /* Per-buffer private storage */
} drm_buf_t; } drm_buf_t;
#if DRM_DMA_HISTOGRAM #if DRM_DMA_HISTOGRAM
@ -332,8 +236,8 @@ typedef struct drm_waitlist {
drm_buf_t **rp; /* Read pointer */ drm_buf_t **rp; /* Read pointer */
drm_buf_t **wp; /* Write pointer */ drm_buf_t **wp; /* Write pointer */
drm_buf_t **end; /* End pointer */ drm_buf_t **end; /* End pointer */
spinlock_t read_lock; DRM_OS_SPINTYPE read_lock;
spinlock_t write_lock; DRM_OS_SPINTYPE write_lock;
} drm_waitlist_t; } drm_waitlist_t;
typedef struct drm_freelist { typedef struct drm_freelist {
@ -341,11 +245,11 @@ typedef struct drm_freelist {
atomic_t count; /* Number of free buffers */ atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */ drm_buf_t *next; /* End pointer */
int waiting; /* Processes waiting on free bufs */ wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */ int low_mark; /* Low water mark */
int high_mark; /* High water mark */ int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */ atomic_t wfh; /* If waiting for high mark */
struct simplelock lock; /* hope this doesn't need to be linux compatible */ DRM_OS_SPINTYPE lock;
} drm_freelist_t; } drm_freelist_t;
typedef struct drm_buf_entry { typedef struct drm_buf_entry {
@ -365,7 +269,7 @@ typedef struct drm_hw_lock {
} drm_hw_lock_t; } drm_hw_lock_t;
typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t; typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
typedef struct drm_file { struct drm_file {
TAILQ_ENTRY(drm_file) link; TAILQ_ENTRY(drm_file) link;
int authenticated; int authenticated;
int minor; int minor;
@ -375,33 +279,35 @@ typedef struct drm_file {
drm_magic_t magic; drm_magic_t magic;
unsigned long ioctl_count; unsigned long ioctl_count;
struct drm_device *devXX; struct drm_device *devXX;
} drm_file_t; };
typedef struct drm_queue { typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */ atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */ atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */ atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */ atomic_t block_read; /* Queue blocked for reads */
int read_queue; /* Processes waiting on block_read */ wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */ atomic_t block_write; /* Queue blocked for writes */
int write_queue; /* Processes waiting on block_write */ wait_queue_head_t write_queue; /* Processes waiting on block_write */
#if 1
atomic_t total_queued; /* Total queued statistic */ atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */ atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */ atomic_t total_locks; /* Total locks statistics */
#endif
drm_ctx_flags_t flags; /* Context preserving and 2D-only */ drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */ drm_waitlist_t waitlist; /* Pending buffers */
int flush_queue; /* Processes waiting until flush */ wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t; } drm_queue_t;
typedef struct drm_lock_data { typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */ drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */ pid_t pid; /* PID of lock holder (0=kernel) */
int lock_queue; /* Queue of blocked processes */ wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */ unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t; } drm_lock_data_t;
typedef struct drm_device_dma { typedef struct drm_device_dma {
#if 0
/* Performance Counters */ /* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */ atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */ atomic_t total_bytes; /* Total bytes DMA'd */
@ -415,27 +321,28 @@ typedef struct drm_device_dma {
atomic_t total_tried; /* Tried next_buffer */ atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */ atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */ atomic_t total_lost; /* Lost interrupt */
#endif
drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count; int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */ drm_buf_t **buflist; /* Vector of pointers info bufs */
int seg_count; int seg_count;
int page_count; int page_count;
vm_offset_t *pagelist; unsigned long *pagelist;
unsigned long byte_count; unsigned long byte_count;
enum { enum {
_DRM_DMA_USE_AGP = 0x01 _DRM_DMA_USE_AGP = 0x01,
_DRM_DMA_USE_SG = 0x02
} flags; } flags;
/* DMA support */ /* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */ drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */ drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/ drm_queue_t *next_queue; /* Queue from which buffer selected*/
int waiting; /* Processes waiting on free bufs */ wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t; } drm_device_dma_t;
#ifdef DRM_AGP #if __REALLY_HAVE_AGP
typedef struct drm_agp_mem { typedef struct drm_agp_mem {
void *handle; void *handle;
unsigned long bound; /* address */ unsigned long bound; /* address */
@ -454,11 +361,30 @@ typedef struct drm_agp_head {
int acquired; int acquired;
unsigned long base; unsigned long base;
int agp_mtrr; int agp_mtrr;
int cant_use_aperture;
unsigned long page_mask;
} drm_agp_head_t; } drm_agp_head_t;
#endif #endif
typedef struct drm_device { typedef struct drm_sg_mem {
unsigned long handle;
void *virtual;
int pages;
struct page **pagelist;
} drm_sg_mem_t;
typedef struct drm_sigdata {
int context;
drm_hw_lock_t *lock;
} drm_sigdata_t;
typedef TAILQ_HEAD(drm_map_list, drm_map_list_entry) drm_map_list_t;
typedef struct drm_map_list_entry {
TAILQ_ENTRY(drm_map_list_entry) link;
drm_map_t *map;
} drm_map_list_entry_t;
struct drm_device {
const char *name; /* Simple driver name */ const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */ char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */ int unique_len; /* Length of unique field */
@ -472,9 +398,8 @@ typedef struct drm_device {
struct proc_dir_entry *root; /* Root for this device's entries */ struct proc_dir_entry *root; /* Root for this device's entries */
/* Locks */ /* Locks */
struct simplelock count_lock; /* For inuse, open_count, buf_use */ DRM_OS_SPINTYPE count_lock; /* For inuse, open_count, buf_use */
struct lock dev_lock; /* For others */ struct lock dev_lock; /* For others */
/* Usage Counters */ /* Usage Counters */
int open_count; /* Outstanding files open */ int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */ atomic_t ioctl_count; /* Outstanding IOCTLs pending */
@ -482,26 +407,22 @@ typedef struct drm_device {
int buf_use; /* Buffers in use -- cannot alloc */ int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */ atomic_t buf_alloc; /* Buffer allocation in progress */
/* Performance Counters */ /* Performance counters */
atomic_t total_open; unsigned long counters;
atomic_t total_close; drm_stat_type_t types[15];
atomic_t total_ioctl; atomic_t counts[15];
atomic_t total_irq; /* Total interruptions */
atomic_t total_ctx; /* Total context switches */
atomic_t total_locks;
atomic_t total_unlocks;
atomic_t total_contends;
atomic_t total_sleeps;
/* Authentication */ /* Authentication */
drm_file_list_t files; drm_file_list_t files;
drm_magic_head_t magiclist[DRM_HASH_SIZE]; drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */ /* Memory management */
drm_map_t **maplist; /* Vector of pointers to regions */ drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */ int map_count; /* Number of mappable regions */
drm_map_t **context_sareas;
int max_context;
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */ drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */ drm_lock_data_t lock; /* Information on hardware lock */
@ -513,22 +434,23 @@ typedef struct drm_device {
drm_device_dma_t *dma; /* Optional pointer for DMA support */ drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */ /* Context support */
struct resource *irq; /* Interrupt used by board */ int irq; /* Interrupt used by board */
struct resource *irqr; /* Resource for interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */ void *irqh; /* Handle from bus_setup_intr */
__volatile__ long context_flag; /* Context swapping flag */ __volatile__ long context_flag; /* Context swapping flag */
__volatile__ long interrupt_flag;/* Interruption handler flag */ __volatile__ long interrupt_flag; /* Interruption handler flag */
__volatile__ long dma_flag; /* DMA dispatch flag */ __volatile__ long dma_flag; /* DMA dispatch flag */
struct callout timer; /* Timer for delaying ctx switch */ struct callout timer; /* Timer for delaying ctx switch */
int context_wait; /* Processes waiting on ctx switch */ wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */ int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */ int last_context; /* Last current context */
int last_switch; /* Time at last context switch */ unsigned long last_switch; /* jiffies at last context switch */
#if __FreeBSD_version >= 400005 #if __FreeBSD_version >= 400005
struct task task; struct task task;
#endif #endif
struct timespec ctx_start; cycles_t ctx_start;
struct timespec lck_start; cycles_t lck_start;
#if DRM_DMA_HISTOGRAM #if __HAVE_DMA_HISTOGRAM
drm_histogram_t histo; drm_histogram_t histo;
#endif #endif
@ -540,196 +462,171 @@ typedef struct drm_device {
char *buf_end; /* End pointer */ char *buf_end; /* End pointer */
struct sigio *buf_sigio; /* Processes waiting for SIGIO */ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
struct selinfo buf_sel; /* Workspace for select/poll */ struct selinfo buf_sel; /* Workspace for select/poll */
int buf_readers; /* Processes waiting to read */ int buf_selecting;/* True if poll sleeper */
int buf_writers; /* Processes waiting to ctx switch */ wait_queue_head_t buf_readers; /* Processes waiting to read */
int buf_selecting; /* True if poll sleeper */ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
/* Sysctl support */ /* Sysctl support */
struct drm_sysctl_info *sysctl; struct drm_sysctl_info *sysctl;
#ifdef DRM_AGP #if __REALLY_HAVE_AGP
drm_agp_head_t *agp; drm_agp_head_t *agp;
#endif #endif
u_int32_t *ctx_bitmap; struct pci_dev *pdev;
void *dev_private; #ifdef __alpha__
} drm_device_t; #if LINUX_VERSION_CODE < 0x020403
struct pci_controler *hose;
/* Internal function definitions */
/* Misc. support (init.c) */
extern int drm_flags;
extern void drm_parse_options(char *s);
/* Device support (fops.c) */
extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p);
extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
drm_device_t *dev);
extern d_close_t drm_close;
extern d_read_t drm_read;
extern d_write_t drm_write;
extern d_poll_t drm_poll;
extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p);
extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p);
extern int drm_write_string(drm_device_t *dev, const char *s);
#if 0
/* Mapping support (vm.c) */
extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern void drm_vm_open(struct vm_area_struct *vma);
extern void drm_vm_close(struct vm_area_struct *vma);
extern int drm_mmap_dma(struct file *filp,
struct vm_area_struct *vma);
#endif
extern d_mmap_t drm_mmap;
/* Proc support (proc.c) */
extern int drm_sysctl_init(drm_device_t *dev);
extern int drm_sysctl_cleanup(drm_device_t *dev);
/* Memory management support (memory.c) */
extern void drm_mem_init(void);
#if __FreeBSD_version < 411000
#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
#else #else
#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS) struct pci_controller *hose;
#endif #endif
extern int drm_mem_info DRM_SYSCTL_HANDLER_ARGS;
extern void *drm_alloc(size_t size, int area);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *drm_strdup(const char *s, int area);
extern void drm_strfree(char *s, int area);
extern void drm_free(void *pt, size_t size, int area);
extern unsigned long drm_alloc_pages(int order, int area);
extern void drm_free_pages(unsigned long address, int order,
int area);
extern void *drm_ioremap(unsigned long offset, unsigned long size);
extern void drm_ioremapfree(void *pt, unsigned long size);
#ifdef DRM_AGP
extern void *drm_alloc_agp(int pages, u_int32_t type);
extern int drm_free_agp(void *handle, int pages);
extern int drm_bind_agp(void *handle, unsigned int start);
extern int drm_unbind_agp(void *handle);
#endif #endif
drm_sg_mem_t *sg; /* Scatter gather memory */
unsigned long *ctx_bitmap;
void *dev_private;
drm_sigdata_t sigdata; /* For block_all_signals */
sigset_t sigmask;
};
/* Buffer management support (bufs.c) */ extern int DRM(flags);
extern int drm_order(unsigned long size); extern void DRM(parse_options)( char *s );
extern d_ioctl_t drm_addmap; extern int DRM(cpu_valid)( void );
extern d_ioctl_t drm_addbufs;
extern d_ioctl_t drm_infobufs;
extern d_ioctl_t drm_markbufs;
extern d_ioctl_t drm_freebufs;
extern d_ioctl_t drm_mapbufs;
/* Authentication (drm_auth.h) */
/* Buffer list management support (lists.c) */ extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv,
extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
extern int drm_waitlist_destroy(drm_waitlist_t *bl);
extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
extern int drm_freelist_create(drm_freelist_t *bl, int count);
extern int drm_freelist_destroy(drm_freelist_t *bl);
extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
/* DMA support (gen_dma.c) */
extern void drm_dma_setup(drm_device_t *dev);
extern void drm_dma_takedown(drm_device_t *dev);
extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int drm_context_switch(drm_device_t *dev, int old, int new);
extern int drm_context_switch_complete(drm_device_t *dev, int new);
extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
extern void drm_clear_next_buffer(drm_device_t *dev);
extern int drm_select_queue(drm_device_t *dev,
void (*wrapper)(void *));
extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
#if DRM_DMA_HISTOGRAM
extern int drm_histogram_slot(struct timespec *ts);
extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Misc. IOCTL support (ioctl.c) */
extern d_ioctl_t drm_irq_busid;
extern d_ioctl_t drm_getunique;
extern d_ioctl_t drm_setunique;
/* Context IOCTL support (context.c) */
extern d_ioctl_t drm_resctx;
extern d_ioctl_t drm_addctx;
extern d_ioctl_t drm_modctx;
extern d_ioctl_t drm_getctx;
extern d_ioctl_t drm_switchctx;
extern d_ioctl_t drm_newctx;
extern d_ioctl_t drm_rmctx;
/* Drawable IOCTL support (drawable.c) */
extern d_ioctl_t drm_adddraw;
extern d_ioctl_t drm_rmdraw;
/* Authentication IOCTL support (auth.c) */
extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
drm_magic_t magic); drm_magic_t magic);
extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic); extern int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic);
extern d_ioctl_t drm_getmagic;
extern d_ioctl_t drm_authmagic;
/* Driver support (drm_drv.h) */
extern int DRM(version)( DRM_OS_IOCTL );
extern int DRM(write_string)(drm_device_t *dev, const char *s);
/* Locking IOCTL support (lock.c) */ /* Memory management support (drm_memory.h) */
extern d_ioctl_t drm_block; extern void DRM(mem_init)(void);
extern d_ioctl_t drm_unblock; extern void *DRM(alloc)(size_t size, int area);
extern int drm_lock_take(__volatile__ unsigned int *lock, extern void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *DRM(strdup)(const char *s, int area);
extern void DRM(strfree)(char *s, int area);
extern void DRM(free)(void *pt, size_t size, int area);
extern unsigned long DRM(alloc_pages)(int order, int area);
extern void DRM(free_pages)(unsigned long address, int order,
int area);
extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
extern void DRM(ioremapfree)(void *pt, unsigned long size);
#if __REALLY_HAVE_AGP
extern agp_memory *DRM(alloc_agp)(int pages, u32 type);
extern int DRM(free_agp)(agp_memory *handle, int pages);
extern int DRM(bind_agp)(agp_memory *handle, unsigned int start);
extern int DRM(unbind_agp)(agp_memory *handle);
#endif
extern int DRM(context_switch)(drm_device_t *dev, int old, int new);
extern int DRM(context_switch_complete)(drm_device_t *dev, int new);
#if __HAVE_CTX_BITMAP
extern int DRM(ctxbitmap_init)( drm_device_t *dev );
extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev );
extern void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle );
extern int DRM(ctxbitmap_next)( drm_device_t *dev );
#endif
/* Locking IOCTL support (drm_lock.h) */
extern int DRM(lock_take)(__volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern int drm_lock_transfer(drm_device_t *dev, extern int DRM(lock_transfer)(drm_device_t *dev,
__volatile__ unsigned int *lock, __volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern int drm_lock_free(drm_device_t *dev, extern int DRM(lock_free)(drm_device_t *dev,
__volatile__ unsigned int *lock, __volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern d_ioctl_t drm_finish; extern int DRM(flush_unblock)(drm_device_t *dev, int context,
extern int drm_flush_unblock(drm_device_t *dev, int context,
drm_lock_flags_t flags); drm_lock_flags_t flags);
extern int drm_flush_block_and_flush(drm_device_t *dev, int context, extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags); drm_lock_flags_t flags);
extern int DRM(notifier)(void *priv);
/* Context Bitmap support (ctxbitmap.c) */ /* Buffer management support (drm_bufs.h) */
extern int drm_ctxbitmap_init(drm_device_t *dev); extern int DRM(order)( unsigned long size );
extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
extern int drm_ctxbitmap_next(drm_device_t *dev);
extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
#ifdef DRM_AGP #if __HAVE_DMA
/* AGP/GART support (agpsupport.c) */ /* DMA support (drm_dma.h) */
extern drm_agp_head_t *drm_agp_init(void); extern int DRM(dma_setup)(drm_device_t *dev);
extern d_ioctl_t drm_agp_acquire; extern void DRM(dma_takedown)(drm_device_t *dev);
extern d_ioctl_t drm_agp_release; extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
extern d_ioctl_t drm_agp_enable; extern void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid);
extern d_ioctl_t drm_agp_info; #if __HAVE_OLD_DMA
extern d_ioctl_t drm_agp_alloc; /* GH: This is a dirty hack for now...
extern d_ioctl_t drm_agp_free; */
extern d_ioctl_t drm_agp_unbind; extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern d_ioctl_t drm_agp_bind; extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
#endif
#if __HAVE_DMA_IRQ
extern int DRM(irq_install)( drm_device_t *dev, int irq );
extern int DRM(irq_uninstall)( drm_device_t *dev );
extern void DRM(dma_service)( DRM_OS_IRQ_ARGS );
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( DRM_OS_TASKQUEUE_ARGS );
#endif #endif
#endif #endif
#if DRM_DMA_HISTOGRAM
extern int DRM(histogram_slot)(unsigned long count);
extern void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Buffer list support (drm_lists.h) */
#if __HAVE_DMA_WAITLIST
extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
#endif
#if __HAVE_DMA_FREELIST
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif
#endif /* __HAVE_DMA */
#if __REALLY_HAVE_AGP
/* AGP/GART support (drm_agpsupport.h) */
extern drm_agp_head_t *DRM(agp_init)(void);
extern void DRM(agp_uninit)(void);
extern void DRM(agp_do_release)(void);
extern agp_memory *DRM(agp_allocate_memory)(size_t pages, u32 type);
extern int DRM(agp_free_memory)(agp_memory *handle);
extern int DRM(agp_bind_memory)(agp_memory *handle, off_t start);
extern int DRM(agp_unbind_memory)(agp_memory *handle);
#endif
/* Proc support (drm_proc.h) */
extern struct proc_dir_entry *DRM(proc_init)(drm_device_t *dev,
int minor,
struct proc_dir_entry *root,
struct proc_dir_entry **dev_root);
extern int DRM(proc_cleanup)(int minor,
struct proc_dir_entry *root,
struct proc_dir_entry *dev_root);
#if __HAVE_SG
/* Scatter Gather Support (drm_scatter.h) */
extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
#endif
#if __REALLY_HAVE_SG
/* ATI PCIGART support (ati_pcigart.h) */
extern int DRM(ati_pcigart_init)(drm_device_t *dev,
unsigned long *addr,
dma_addr_t *bus_addr);
extern int DRM(ati_pcigart_cleanup)(drm_device_t *dev,
unsigned long addr,
dma_addr_t bus_addr);
#endif
#endif /* __KERNEL__ */
#endif #endif

View File

@ -1,10 +1,10 @@
# $FreeBSD$ # $FreeBSD$
KMOD = tdfx KMOD= tdfx
SRCS = tdfx_drv.c tdfx_context.c NOMAN= YES
SRCS += device_if.h bus_if.h pci_if.h SRCS= tdfx_drv.c
CFLAGS += ${DEBUG_FLAGS} -I. -I.. SRCS+= device_if.h bus_if.h pci_if.h opt_drm_linux.h
KMODDEPS = drm CFLAGS+= ${DEBUG_FLAGS} -I. -I..
@: @:
ln -sf /sys @ ln -sf /sys @
@ -12,4 +12,14 @@ KMODDEPS = drm
machine: machine:
ln -sf /sys/i386/include machine ln -sf /sys/i386/include machine
.if ${MACHINE_ARCH} == "i386"
# This line enables linux ioctl handling
# If you want support for this uncomment this line
#TDFX_OPTS= "\#define DRM_LINUX" 1
.endif
opt_drm_linux.h:
touch opt_drm_linux.h
echo $(TDFX_OPTS) >> opt_drm_linux.h
.include <bsd.kmod.mk> .include <bsd.kmod.mk>

View File

@ -1,28 +1,4 @@
XCOMM $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/Imakefile,v 1.6 2001/04/18 14:52:43 dawes Exp $ XCOMM $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/Imakefile,v 1.8 2001/12/13 00:24:45 alanh Exp $
#include <Server.tmpl>
#if 0
LinkSourceFile(xf86drm.c,..)
LinkSourceFile(xf86drmHash.c,..)
LinkSourceFile(xf86drmRandom.c,..)
LinkSourceFile(xf86drmSL.c,..)
LinkSourceFile(xf86drm.h,$(XF86OSSRC))
LinkSourceFile(xf86_OSproc.h,$(XF86OSSRC))
LinkSourceFile(sigio.c,$(XF86OSSRC)/shared)
#endif
XCOMM Try to use the Linux version of the DRM headers. This avoids skew
XCOMM and missing headers. If there's a need to break them out, they
XCOMM can be re-added later. If not, they can be moved to somewhere more
XCOMM OS-independent and referenced from there.
LinkSourceFile(drm.h,$(XF86OSSRC)/linux/drm/kernel)
LinkSourceFile(i810_drm.h,$(XF86OSSRC)/linux/drm/kernel)
LinkSourceFile(mga_drm.h,$(XF86OSSRC)/linux/drm/kernel)
LinkSourceFile(r128_drm.h,$(XF86OSSRC)/linux/drm/kernel)
LinkSourceFile(radeon_drm.h,$(XF86OSSRC)/linux/drm/kernel)
LinkSourceFile(sis_drm.h,$(XF86OSSRC)/linux/drm/kernel)
XCOMM This is a kludge until we determine how best to build the XCOMM This is a kludge until we determine how best to build the
XCOMM kernel-specific device driver. This allows us to continue XCOMM kernel-specific device driver. This allows us to continue
@ -37,7 +13,7 @@ install::
$(MAKE) -f Makefile.bsd install $(MAKE) -f Makefile.bsd install
#else #else
all:: all::
@echo 'Use "make -f Makefile.bsd" to manually build drm.o' @echo 'Use "make -f Makefile.bsd" to manually build the modules'
#endif #endif
clean:: clean::

View File

@ -1,5 +1,6 @@
# $FreeBSD$ # $FreeBSD$
SUBDIR = drm tdfx mga gamma # i810, i830 & sis are not complete
SUBDIR = tdfx mga r128 radeon gamma # i810 sis i830
.include <bsd.subdir.mk> .include <bsd.subdir.mk>

View File

@ -1,5 +1,6 @@
# $FreeBSD$ # $FreeBSD$
SUBDIR = drm tdfx mga gamma # i810, i830 & sis are not complete
SUBDIR = tdfx mga r128 radeon gamma # i810 sis i830
.include <bsd.subdir.mk> .include <bsd.subdir.mk>

501
bsd/drm.h Normal file
View File

@ -0,0 +1,501 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
* Acknowledgements:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
*
*/
#ifndef _DRM_H_
#define _DRM_H_
#include <sys/ioccom.h>
#define DRM_IOCTL_NR(n) ((n) & 0xff)
#define XFREE86_VERSION(major,minor,patch,snap) \
((major << 16) | (minor << 8) | patch)
#ifndef CONFIG_XFREE86_VERSION
#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
#endif
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
#define DRM_PROC_DEVICES "/proc/devices"
#define DRM_PROC_MISC "/proc/misc"
#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
#endif
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
#define DRM_MAJOR 226
#define DRM_MAX_MINOR 15
#endif
#define DRM_NAME "drm" /* Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 50 /* How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
/* Warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well */
typedef struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
} drm_clip_rect_t;
typedef struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
} drm_tex_region_t;
/* Seperate include files for the driver specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
#include "i830_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
#include "sis_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
int version_minor; /* Minor version */
int version_patchlevel;/* Patch level */
size_t name_len; /* Length of name buffer */
char *name; /* Name of driver */
size_t date_len; /* Length of date buffer */
char *date; /* User-space buffer to hold date */
size_t desc_len; /* Length of desc buffer */
char *desc; /* User-space buffer to hold desc */
} drm_version_t;
typedef struct drm_unique {
size_t unique_len; /* Length of unique */
char *unique; /* Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /* Length of user-space structures */
drm_version_t *version;
} drm_list_t;
typedef struct drm_block {
int unused;
} drm_block_t;
typedef struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
} drm_control_t;
typedef enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
_DRM_REGISTERS = 1, /* no caching, no core dump */
_DRM_SHM = 2, /* shared, cached */
_DRM_AGP = 3, /* AGP/GART */
_DRM_SCATTER_GATHER = 4 /* Scatter/gather memory for PCI DMA */
} drm_map_type_t;
typedef enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /* SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /* Removable mapping */
} drm_map_flags_t;
typedef struct drm_ctx_priv_map {
unsigned int ctx_id; /* Context requesting private mapping */
void *handle; /* Handle of map */
} drm_ctx_priv_map_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
drm_map_type_t type; /* Type of memory to map */
drm_map_flags_t flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* MTRR slot used */
/* Private data */
} drm_map_t;
typedef struct drm_client {
int idx; /* Which client desired? */
int auth; /* Is client authenticated? */
unsigned long pid; /* Process id */
unsigned long uid; /* User id */
unsigned long magic; /* Magic */
unsigned long iocs; /* Ioctl count */
} drm_client_t;
typedef enum {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
_DRM_STAT_IOCTLS,
_DRM_STAT_LOCKS,
_DRM_STAT_UNLOCKS,
_DRM_STAT_VALUE, /* Generic value */
_DRM_STAT_BYTE, /* Generic byte counter (1024bytes/K) */
_DRM_STAT_COUNT, /* Generic non-byte counter (1000/k) */
_DRM_STAT_IRQ, /* IRQ */
_DRM_STAT_PRIMARY, /* Primary DMA bytes */
_DRM_STAT_SECONDARY, /* Secondary DMA bytes */
_DRM_STAT_DMA, /* DMA */
_DRM_STAT_SPECIAL, /* Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /* Missed DMA opportunity */
/* Add to the *END* of the list */
} drm_stat_type_t;
typedef struct drm_stats {
unsigned long count;
struct {
unsigned long value;
drm_stat_type_t type;
} data[15];
} drm_stats_t;
typedef enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */
} drm_lock_flags_t;
typedef struct drm_lock {
int context;
drm_lock_flags_t flags;
} drm_lock_t;
typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched.
Note, the buffer may not yet have
been processed by the hardware --
getting a hardware lock with the
hardware quiescent will ensure
that the buffer has been
processed. */
_DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /* Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */
_DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */
} drm_dma_flags_t;
typedef struct drm_buf_desc {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /* Buffer is in agp space */
_DRM_SG_BUFFER = 0x04 /* Scatter/gather memory buffer */
} flags;
unsigned long agp_start; /* Start address of where the agp buffers
* are in the agp aperture */
} drm_buf_desc_t;
typedef struct drm_buf_info {
int count; /* Entries in list */
drm_buf_desc_t *list;
} drm_buf_info_t;
typedef struct drm_buf_free {
int count;
int *list;
} drm_buf_free_t;
typedef struct drm_buf_pub {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int used; /* Amount of buffer in use (for DMA) */
void *address; /* Address of buffer */
} drm_buf_pub_t;
typedef struct drm_buf_map {
int count; /* Length of buflist */
void *virtual; /* Mmaped area in user-virtual */
drm_buf_pub_t *list; /* Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int context; /* Context handle */
int send_count; /* Number of buffers to send */
int *send_indices; /* List of handles to buffers */
int *send_sizes; /* Lengths of data to send */
drm_dma_flags_t flags; /* Flags */
int request_count; /* Number of buffers requested */
int request_size; /* Desired size for buffers */
int *request_indices; /* Buffer information */
int *request_sizes;
int granted_count; /* Number of buffers granted */
} drm_dma_t;
typedef enum {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
} drm_ctx_flags_t;
typedef struct drm_ctx {
drm_context_t handle;
drm_ctx_flags_t flags;
} drm_ctx_t;
typedef struct drm_ctx_res {
int count;
drm_ctx_t *contexts;
} drm_ctx_res_t;
typedef struct drm_draw {
drm_drawable_t handle;
} drm_draw_t;
typedef struct drm_auth {
drm_magic_t magic;
} drm_auth_t;
typedef struct drm_irq_busid {
int irq;
int busnum;
int devnum;
int funcnum;
} drm_irq_busid_t;
typedef struct drm_agp_mode {
unsigned long mode;
} drm_agp_mode_t;
/* For drm_agp_alloc -- allocated a buffer */
typedef struct drm_agp_buffer {
unsigned long size; /* In bytes -- will round to page boundary */
unsigned long handle; /* Used for BIND/UNBIND ioctls */
unsigned long type; /* Type of memory to allocate */
unsigned long physical; /* Physical used by i810 */
} drm_agp_buffer_t;
/* For drm_agp_bind */
typedef struct drm_agp_binding {
unsigned long handle; /* From drm_agp_buffer */
unsigned long offset; /* In bytes -- will round to page boundary */
} drm_agp_binding_t;
typedef struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
} drm_agp_info_t;
typedef struct drm_scatter_gather {
unsigned long size; /* In bytes -- will round to page boundary */
unsigned long handle; /* Used for mapping / unmapping */
} drm_scatter_gather_t;
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
/* MGA specific ioctls */
#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42)
#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t)
/* i810 specific ioctls */
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44)
#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t)
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t)
/* Radeon specific ioctls */
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45)
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t)
#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t)
#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t)
#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
/* I830 specific ioctls */
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif

View File

@ -1,16 +0,0 @@
# $FreeBSD$
KMOD = drm
SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c \
agpsupport.c ctxbitmap.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

View File

@ -1,270 +0,0 @@
/* agpsupport.c -- DRM support for AGP/GART backend
* Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#ifdef DRM_AGP
#include <pci/agpvar.h>
MODULE_DEPEND(drm, agp, 1, 1, 1);
int
drm_agp_info(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
struct agp_info *kern;
drm_agp_info_t info;
if (!dev->agp->acquired) return EINVAL;
kern = &dev->agp->info;
agp_get_info(dev->agp->agpdev, kern);
info.agp_version_major = 1;
info.agp_version_minor = 0;
info.mode = kern->ai_mode;
info.aperture_base = kern->ai_aperture_base;
info.aperture_size = kern->ai_aperture_size;
info.memory_allowed = kern->ai_memory_allowed;
info.memory_used = kern->ai_memory_used;
info.id_vendor = kern->ai_devid & 0xffff;
info.id_device = kern->ai_devid >> 16;
*(drm_agp_info_t *) data = info;
return 0;
}
int
drm_agp_acquire(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int retcode;
if (dev->agp->acquired) return EINVAL;
retcode = agp_acquire(dev->agp->agpdev);
if (retcode) return retcode;
dev->agp->acquired = 1;
return 0;
}
int
drm_agp_release(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
if (!dev->agp->acquired) return EINVAL;
agp_release(dev->agp->agpdev);
dev->agp->acquired = 0;
return 0;
}
int
drm_agp_enable(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_agp_mode_t mode;
if (!dev->agp->acquired) return EINVAL;
mode = *(drm_agp_mode_t *) data;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode);
dev->agp->base = dev->agp->info.ai_aperture_base;
dev->agp->enabled = 1;
return 0;
}
int drm_agp_alloc(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_agp_buffer_t request;
drm_agp_mem_t *entry;
void *handle;
unsigned long pages;
u_int32_t type;
struct agp_memory_info info;
if (!dev->agp->acquired) return EINVAL;
request = *(drm_agp_buffer_t *) data;
if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
return ENOMEM;
memset(entry, 0, sizeof(*entry));
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u_int32_t) request.type;
if (!(handle = drm_alloc_agp(pages, type))) {
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return ENOMEM;
}
entry->handle = handle;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory) dev->agp->memory->prev = entry;
dev->agp->memory = entry;
agp_memory_info(dev->agp->agpdev, entry->handle, &info);
request.handle = (unsigned long) entry->handle;
request.physical = info.ami_physical;
*(drm_agp_buffer_t *) data = request;
return 0;
}
static drm_agp_mem_t *
drm_agp_lookup_entry(drm_device_t *dev, void *handle)
{
drm_agp_mem_t *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
if (entry->handle == handle) return entry;
}
return NULL;
}
int
drm_agp_unbind(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
if (!dev->agp->acquired) return EINVAL;
request = *(drm_agp_binding_t *) data;
if (!(entry = drm_agp_lookup_entry(dev, (void *) request.handle)))
return EINVAL;
if (!entry->bound) return EINVAL;
return drm_unbind_agp(entry->handle);
}
int drm_agp_bind(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
int retcode;
int page;
if (!dev->agp->acquired) return EINVAL;
request = *(drm_agp_binding_t *) data;
if (!(entry = drm_agp_lookup_entry(dev, (void *) request.handle)))
return EINVAL;
if (entry->bound) return EINVAL;
page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
if ((retcode = drm_bind_agp(entry->handle, page))) return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
return 0;
}
int drm_agp_free(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_agp_buffer_t request;
drm_agp_mem_t *entry;
if (!dev->agp->acquired) return EINVAL;
request = *(drm_agp_buffer_t *) data;
if (!(entry = drm_agp_lookup_entry(dev, (void*) request.handle)))
return EINVAL;
if (entry->bound) drm_unbind_agp(entry->handle);
if (entry->prev) entry->prev->next = entry->next;
else dev->agp->memory = entry->next;
if (entry->next) entry->next->prev = entry->prev;
drm_free_agp(entry->handle, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return 0;
}
drm_agp_head_t *drm_agp_init(void)
{
device_t agpdev;
drm_agp_head_t *head = NULL;
int agp_available = 1;
agpdev = agp_find_device();
if (!agpdev)
agp_available = 0;
DRM_DEBUG("agp_available = %d\n", agp_available);
if (agp_available) {
if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
return NULL;
memset((void *)head, 0, sizeof(*head));
head->agpdev = agpdev;
agp_get_info(agpdev, &head->info);
head->memory = NULL;
#if 0 /* bogus */
switch (head->agp_info.chipset) {
case INTEL_GENERIC: head->chipset = "Intel"; break;
case INTEL_LX: head->chipset = "Intel 440LX"; break;
case INTEL_BX: head->chipset = "Intel 440BX"; break;
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
case SIS_GENERIC: head->chipset = "SiS"; break;
case AMD_GENERIC: head->chipset = "AMD"; break;
case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
case ALI_GENERIC: head->chipset = "ALi"; break;
case ALI_M1541: head->chipset = "ALi M1541"; break;
default:
}
#endif
DRM_INFO("AGP at 0x%08x %dMB\n",
head->info.ai_aperture_base,
head->info.ai_aperture_size >> 20);
}
return head;
}
#endif /* DRM_AGP */

View File

@ -1,168 +0,0 @@
/* auth.c -- IOCTLs for authentication -*- c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
static int drm_hash_magic(drm_magic_t magic)
{
return magic & (DRM_HASH_SIZE-1);
}
static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
{
drm_file_t *retval = NULL;
drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->priv->authenticated) continue;
if (pt->magic == magic) {
retval = pt->priv;
break;
}
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return retval;
}
int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
{
int hash;
drm_magic_entry_t *entry;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry) return ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
if (dev->magiclist[hash].tail) {
dev->magiclist[hash].tail->next = entry;
dev->magiclist[hash].tail = entry;
} else {
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) {
dev->magiclist[hash].head = pt->next;
}
if (dev->magiclist[hash].tail == pt) {
dev->magiclist[hash].tail = prev;
}
if (prev) {
prev->next = pt->next;
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
return EINVAL;
}
int drm_getmagic(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
static drm_magic_t sequence = 0;
#if 0
static struct simplelock lock; /* XXX */
#endif
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
drm_auth_t auth;
/* Find unique magic */
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find file structure\n");
return EINVAL;
}
if (priv->magic) {
auth.magic = priv->magic;
} else {
do {
simple_lock(&lock);
if (!sequence) ++sequence; /* reserve 0 */
auth.magic = sequence++;
simple_unlock(&lock);
} while (drm_find_file(dev, auth.magic));
priv->magic = auth.magic;
drm_add_magic(dev, priv, auth.magic);
}
DRM_DEBUG("%u\n", auth.magic);
*(drm_auth_t *) data = auth;
return 0;
}
int drm_authmagic(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_auth_t auth;
drm_file_t *file;
auth = *(drm_auth_t *) data;
DRM_DEBUG("%u\n", auth.magic);
if ((file = drm_find_file(dev, auth.magic))) {
file->authenticated = 1;
drm_remove_magic(dev, auth.magic);
return 0;
}
return EINVAL;
}

View File

@ -1,500 +0,0 @@
/* bufs.c -- IOCTLs to manage buffers -*- c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <sys/mman.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
#include <vm/vm_map.h>
/* Compute order. Can be made faster. */
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
for (order = 0, tmp = size; tmp >>= 1; ++order);
if (size & ~(1 << order)) ++order;
return order;
}
int drm_addmap(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_map_t *map;
if (!(dev->flags & (FREAD|FWRITE)))
return EACCES; /* Require read/write */
map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
if (!map) return ENOMEM;
*map = *(drm_map_t *) data;
DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type);
if ((map->offset & (PAGE_SIZE-1)) || (map->size & (PAGE_SIZE-1))) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
DRM_DEBUG("offset or size not page aligned\n");
return EINVAL;
}
map->mtrr = -1;
map->handle = 0;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if (map->offset + map->size < map->offset
/* || map->offset < virt_to_phys(high_memory) */) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
DRM_DEBUG("bad frame buffer size\n");
return EINVAL;
}
#ifdef CONFIG_MTRR
if (map->type == _DRM_FRAME_BUFFER
|| (map->flags & _DRM_WRITE_COMBINING)) {
map->mtrr = mtrr_add(map->offset, map->size,
MTRR_TYPE_WRCOMB, 1);
}
#endif
map->handle = drm_ioremap(map->offset, map->size);
break;
case _DRM_SHM:
DRM_DEBUG("%ld %d\n", map->size, drm_order(map->size));
map->handle = (void *)drm_alloc_pages(drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
if (!map->handle) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return ENOMEM;
}
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
dev->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
#ifdef DRM_AGP
case _DRM_AGP:
map->offset = map->offset + dev->agp->base;
break;
#endif
default:
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
DRM_DEBUG("bad type\n");
return EINVAL;
}
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
if (dev->maplist) {
++dev->map_count;
dev->maplist = drm_realloc(dev->maplist,
(dev->map_count-1)
* sizeof(*dev->maplist),
dev->map_count
* sizeof(*dev->maplist),
DRM_MEM_MAPS);
} else {
dev->map_count = 1;
dev->maplist = drm_alloc(dev->map_count*sizeof(*dev->maplist),
DRM_MEM_MAPS);
}
dev->maplist[dev->map_count-1] = map;
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
*(drm_map_t *) data = *map;
if (map->type != _DRM_SHM)
((drm_map_t *)data)->handle = (void *) map->offset;
return 0;
}
int drm_addbufs(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count;
int order;
int size;
int total;
int page_order;
drm_buf_entry_t *entry;
unsigned long page;
drm_buf_t *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
request.count, request.size, size, order, dev->queue_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
if (dev->queue_count) return EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
simple_lock(&dev->count_lock);
if (dev->buf_use) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
atomic_inc(&dev->buf_alloc);
simple_unlock(&dev->count_lock);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
entry = &dma->bufs[order];
if (entry->buf_count) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
DRM_MEM_SEGS);
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->seglist, 0, count * sizeof(*entry->seglist));
dma->pagelist = drm_realloc(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
entry->seglist[entry->seg_count++] = page;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i);
dma->pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->dma_wait = 0;
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
timespecclear(&buf->time_queued);
timespecclear(&buf->time_dispatched);
timespecclear(&buf->time_completed);
timespecclear(&buf->time_freed);
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
request.count = entry->buf_count;
request.size = size;
*(drm_buf_desc_t *) data = request;
atomic_dec(&dev->buf_alloc);
return 0;
}
int drm_infobufs(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
int i;
int count;
if (!dma) return EINVAL;
simple_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
simple_unlock(&dev->count_lock);
request = *(drm_buf_info_t *) data;
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
}
DRM_DEBUG("count = %d\n", count);
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
int error;
error = copyout(&dma->bufs[i].buf_count,
&request.list[count].count,
sizeof(dma->bufs[0]
.buf_count));
if (error) return error;
error = copyout(&dma->bufs[i].buf_size,
&request.list[count].size,
sizeof(dma->bufs[0].buf_size));
if (error) return error;
error = copyout(&dma->bufs[i]
.freelist.low_mark,
&request.list[count].low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark));
if (error) return error;
error = copyout(&dma->bufs[i]
.freelist.high_mark,
&request.list[count].high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark));
if (error) return error;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
}
request.count = count;
*(drm_buf_info_t *) data = request;
return 0;
}
int drm_markbufs(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int order;
drm_buf_entry_t *entry;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
order = drm_order(request.size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
entry = &dma->bufs[order];
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
return EINVAL;
if (request.high_mark < 0 || request.high_mark > entry->buf_count)
return EINVAL;
entry->freelist.low_mark = request.low_mark;
entry->freelist.high_mark = request.high_mark;
return 0;
}
int drm_freebufs(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
int i;
int idx;
int error;
drm_buf_t *buf;
if (!dma) return EINVAL;
request = *(drm_buf_free_t *) data;
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
error = copyin(&request.list[i], &idx, sizeof(idx));
if (error)
return error;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != p->p_pid) {
DRM_ERROR("Process %d freeing buffer owned by %d\n",
p->p_pid, buf->pid);
return EINVAL;
}
drm_free_buffer(dev, buf);
}
return 0;
}
int drm_mapbufs(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
vm_offset_t virtual;
vm_offset_t address;
drm_buf_map_t request;
int i;
if (!dma) return EINVAL;
DRM_DEBUG("\n");
simple_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
simple_unlock(&dev->count_lock);
request = *(drm_buf_map_t *) data;
if (request.count >= dma->buf_count) {
virtual = 0;
retcode = vm_mmap(&p->p_vmspace->vm_map,
&virtual,
round_page(dma->byte_count),
PROT_READ|PROT_WRITE, VM_PROT_ALL,
MAP_SHARED,
SLIST_FIRST(&kdev->si_hlist),
0);
if (retcode)
goto done;
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
retcode = copyout(&dma->buflist[i]->idx,
&request.list[i].idx,
sizeof(request.list[0].idx));
if (retcode) goto done;
retcode = copyout(&dma->buflist[i]->total,
&request.list[i].total,
sizeof(request.list[0].total));
if (retcode) goto done;
retcode = copyout(&zero,
&request.list[i].used,
sizeof(request.list[0].used));
if (retcode) goto done;
address = virtual + dma->buflist[i]->offset;
retcode = copyout(&address,
&request.list[i].address,
sizeof(address));
if (retcode) goto done;
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
*(drm_buf_map_t *) data = request;
return retcode;
}

View File

@ -1,297 +0,0 @@
/* context.c -- IOCTLs for contexts and DMA queues -*- c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
static int drm_init_queue(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
DRM_DEBUG("\n");
if (atomic_read(&q->use_count) != 1
|| atomic_read(&q->finalization)
|| atomic_read(&q->block_count)) {
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count));
}
atomic_set(&q->finalization, 0);
atomic_set(&q->block_count, 0);
atomic_set(&q->block_read, 0);
atomic_set(&q->block_write, 0);
atomic_set(&q->total_queued, 0);
atomic_set(&q->total_flushed, 0);
atomic_set(&q->total_locks, 0);
q->write_queue = 0;
q->read_queue = 0;
q->flush_queue = 0;
q->flags = ctx->flags;
drm_waitlist_create(&q->waitlist, dev->dma->buf_count);
return 0;
}
/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
disappear (so all deallocation must be done after IOCTLs are off)
2) dev->queue_count < dev->queue_slots
3) dev->queuelist[i].use_count == 0 and
dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
2) dev->queue_count < dev->queue_slots */
static int drm_alloc_queue(drm_device_t *dev)
{
int i;
drm_queue_t *queue;
int oldslots;
int newslots;
/* Check for a free queue */
for (i = 0; i < dev->queue_count; i++) {
atomic_inc(&dev->queuelist[i]->use_count);
if (atomic_read(&dev->queuelist[i]->use_count) == 1
&& !atomic_read(&dev->queuelist[i]->finalization)) {
DRM_DEBUG("%d (free)\n", i);
return i;
}
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
++dev->queue_count;
if (dev->queue_count >= dev->queue_slots) {
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
if (!dev->queue_slots) dev->queue_slots = 1;
dev->queue_slots *= 2;
newslots = dev->queue_slots * sizeof(*dev->queuelist);
dev->queuelist = drm_realloc(dev->queuelist,
oldslots,
newslots,
DRM_MEM_QUEUES);
if (!dev->queuelist) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
}
dev->queuelist[dev->queue_count-1] = queue;
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
return dev->queue_count - 1;
}
int drm_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
int error;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
res = *(drm_ctx_res_t *) data;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
error = copyout(&i, &res.contexts[i],
sizeof(i));
if (error) return error;
}
}
res.count = DRM_RESERVED_CONTEXTS;
*(drm_ctx_res_t *) data = res;
return 0;
}
int drm_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
ctx.handle = drm_alloc_queue(dev);
}
drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
*(drm_ctx_t *) data = ctx;
return 0;
}
int drm_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
drm_queue_t *q;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
if (DRM_BUFCOUNT(&q->waitlist)) {
atomic_dec(&q->use_count);
return -EBUSY;
}
q->flags = ctx.flags;
atomic_dec(&q->use_count);
return 0;
}
int drm_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
drm_queue_t *q;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
ctx.flags = q->flags;
atomic_dec(&q->use_count);
*(drm_ctx_t *) data = ctx;
return 0;
}
int drm_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
return drm_context_switch(dev, dev->last_context, ctx.handle);
}
int drm_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
drm_context_switch_complete(dev, ctx.handle);
return 0;
}
int drm_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
finalization) */
/* Wait while interrupt servicing is in progress */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
int never;
int error = tsleep(&never, PZERO|PCATCH, "drmrc", 1);
if (error) {
clear_bit(0, &dev->interrupt_flag);
return error;
}
}
/* Remove queued buffers */
while ((buf = drm_waitlist_get(&q->waitlist))) {
drm_free_buffer(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wakeup(&q->read_queue);
wakeup(&q->write_queue);
wakeup(&q->flush_queue);
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
return 0;
}

View File

@ -1,85 +0,0 @@
/* ctxbitmap.c -- Context bitmap management
* Created: Thu Jan 6 03:56:42 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jeff Hartmann <jhartmann@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
{
if (ctx_handle < 0) goto failed;
if (ctx_handle < DRM_MAX_CTXBITMAP) {
clear_bit(ctx_handle, dev->ctx_bitmap);
return;
}
failed:
DRM_ERROR("Attempt to free invalid context handle: %d\n",
ctx_handle);
return;
}
int drm_ctxbitmap_next(drm_device_t *dev)
{
int bit;
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
return bit;
}
return -1;
}
int drm_ctxbitmap_init(drm_device_t *dev)
{
int i;
int temp;
dev->ctx_bitmap = (u_int32_t *) drm_alloc(PAGE_SIZE,
DRM_MEM_CTXBITMAP);
if(dev->ctx_bitmap == NULL) {
return -ENOMEM;
}
memset((void *) dev->ctx_bitmap, 0, PAGE_SIZE);
for(i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
}
void drm_ctxbitmap_cleanup(drm_device_t *dev)
{
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE,
DRM_MEM_CTXBITMAP);
}

View File

@ -1,543 +0,0 @@
/* dma.c -- DMA IOCTL and function support -*- c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinuxa.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
void drm_dma_setup(drm_device_t *dev)
{
int i;
dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
memset(dev->dma, 0, sizeof(*dev->dma));
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
}
void drm_dma_takedown(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
int i, j;
if (!dma) return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
drm_free_pages(dma->bufs[i].seglist[j],
dma->bufs[i].page_order,
DRM_MEM_DMA);
}
drm_free(dma->bufs[i].seglist,
dma->bufs[i].seg_count
* sizeof(*dma->bufs[0].seglist),
DRM_MEM_SEGS);
}
if(dma->bufs[i].buf_count) {
for(j = 0; j < dma->bufs[i].buf_count; j++) {
if(dma->bufs[i].buflist[j].dev_private) {
drm_free(dma->bufs[i].buflist[j].dev_private,
dma->bufs[i].buflist[j].dev_priv_size,
DRM_MEM_BUFS);
}
}
drm_free(dma->bufs[i].buflist,
dma->bufs[i].buf_count *
sizeof(*dma->bufs[0].buflist),
DRM_MEM_BUFS);
drm_freelist_destroy(&dma->bufs[i].freelist);
}
}
if (dma->buflist) {
drm_free(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
DRM_MEM_BUFS);
}
if (dma->pagelist) {
drm_free(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
DRM_MEM_PAGES);
}
drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
dev->dma = NULL;
}
#if DRM_DMA_HISTOGRAM
/* This is slow, but is useful for debugging. */
int drm_histogram_slot(struct timespec *ts)
{
long count = ts->tv_sec * 1000 + ts->tv_nsec / 1000000;
int value = DRM_DMA_HISTOGRAM_INITIAL;
int slot;
for (slot = 0;
slot < DRM_DMA_HISTOGRAM_SLOTS;
++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
if (count < value) return slot;
}
return DRM_DMA_HISTOGRAM_SLOTS - 1;
}
void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
{
struct timespec queued_to_dispatched;
struct timespec dispatched_to_completed;
struct timespec completed_to_freed;
int q2d, d2c, c2f, q2c, q2f;
if (timespecisset(&buf->time_queued)) {
queued_to_dispatched = buf->time_dispatched;
timespecsub(&queued_to_dispatched, &buf->time_queued);
dispatched_to_completed = buf->time_completed;
timespecsub(&dispatched_to_completed, &buf->time_dispatched);
completed_to_freed = buf->time_freed;
timespecsub(&completed_to_freed, &buf->time_completed);
q2d = drm_histogram_slot(&queued_to_dispatched);
d2c = drm_histogram_slot(&dispatched_to_completed);
c2f = drm_histogram_slot(&completed_to_freed);
timespecadd(&queued_to_dispatched, &dispatched_to_completed);
q2c = drm_histogram_slot(&queued_to_dispatched);
timespecadd(&queued_to_dispatched, &completed_to_freed);
q2f = drm_histogram_slot(&queued_to_dispatched);
atomic_inc(&dev->histo.total);
atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
atomic_inc(&dev->histo.completed_to_freed[c2f]);
atomic_inc(&dev->histo.queued_to_completed[q2c]);
atomic_inc(&dev->histo.queued_to_freed[q2f]);
}
timespecclear(&buf->time_queued);
timespecclear(&buf->time_dispatched);
timespecclear(&buf->time_completed);
timespecclear(&buf->time_freed);
}
#endif
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
{
drm_device_dma_t *dma = dev->dma;
if (!buf) return;
buf->waiting = 0;
buf->pending = 0;
buf->pid = 0;
buf->used = 0;
#if DRM_DMA_HISTOGRAMxx
buf->time_completed = get_cycles();
#endif
if (buf->dma_wait) {
buf->dma_wait = 0;
wakeup(&buf->dma_wait);
} else {
/* If processes are waiting, the last one
to wake will put the buffer on the free
list. If no processes are waiting, we
put the buffer on the freelist here. */
drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
}
}
void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
{
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->pid == pid) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
int drm_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
drm_queue_t *q;
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return EBUSY;
}
#if DRM_DMA_HISTOGRAM
getnanotime(&dev->ctx_start);
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new >= dev->queue_count) {
clear_bit(0, &dev->context_flag);
return EINVAL;
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
q = dev->queuelist[new];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
atomic_dec(&q->use_count);
clear_bit(0, &dev->context_flag);
return EINVAL;
}
if (drm_flags & DRM_FLAG_NOCTX) {
drm_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
atomic_dec(&q->use_count);
return 0;
}
int drm_context_switch_complete(drm_device_t *dev, int new)
{
drm_device_dma_t *dma = dev->dma;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = ticks;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("Cannot free lock\n");
}
}
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &dev->ctx_start);
atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
}
#endif
clear_bit(0, &dev->context_flag);
wakeup(&dev->context_wait);
return 0;
}
void drm_clear_next_buffer(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
dma->next_buffer = NULL;
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
wakeup(&dma->next_queue->flush_queue);
}
dma->next_queue = NULL;
}
int drm_select_queue(drm_device_t *dev, void (*wrapper)(void *))
{
int i;
int candidate = -1;
int j = ticks;
if (!dev) {
DRM_ERROR("No device\n");
return -1;
}
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
/* This only happens between the time the
interrupt is initialized and the time
the queues are initialized. */
return -1;
}
/* Doing "while locked" DMA? */
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
return DRM_KERNEL_CONTEXT;
}
/* If there are buffers on the last_context
queue, and we have not been executing
this context very long, continue to
execute this context. */
if (dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j
&& DRM_WAITCOUNT(dev, dev->last_context)) {
return dev->last_context;
}
/* Otherwise, find a candidate */
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
if (candidate < 0) {
for (i = 0; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
}
if (wrapper
&& candidate >= 0
&& candidate != dev->last_context
&& dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j) {
int s = splclock();
if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
callout_reset(&dev->timer,
dev->last_switch + DRM_TIME_SLICE - j,
wrapper,
dev);
}
splx(s);
return -1;
}
return candidate;
}
int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
{
int i;
drm_queue_t *q;
drm_buf_t *buf;
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
int error;
DRM_DEBUG("%d\n", d->send_count);
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
int context = dev->lock.hw_lock->lock;
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return EINVAL;
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
DRM_ERROR("Lock held by %d while %d makes"
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return EINVAL;
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;
} else {
q = dev->queuelist[d->context];
}
atomic_inc(&q->use_count);
if (atomic_read(&q->block_write)) {
atomic_inc(&q->block_count);
for (;;) {
if (!atomic_read(&q->block_write)) break;
error = tsleep(&q->block_write, PZERO|PCATCH,
"dmawr", 0);
if (error) {
atomic_dec(&q->use_count);
return error;
}
}
atomic_dec(&q->block_count);
}
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
return EINVAL;
}
buf = dma->buflist[ idx ];
if (buf->pid != curproc->p_pid) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer owned by %d\n",
curproc->p_pid, buf->pid);
return EINVAL;
}
if (buf->list != DRM_LIST_NONE) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
curproc->p_pid, buf->idx, buf->list);
}
buf->used = d->send_sizes[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return EINVAL;
}
if (buf->waiting) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return EINVAL;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
|| atomic_read(&q->finalization)) {
drm_free_buffer(dev, buf);
} else {
drm_waitlist_put(&q->waitlist, buf);
atomic_inc(&q->total_queued);
}
}
atomic_dec(&q->use_count);
return 0;
}
static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
int order)
{
int i;
int error;
drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma;
for (i = d->granted_count; i < d->request_count; i++) {
buf = drm_freelist_get(&dma->bufs[order].freelist,
d->flags & _DRM_DMA_WAIT);
if (!buf) break;
if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
buf->idx,
buf->pid,
buf->waiting,
buf->pending);
}
buf->pid = curproc->p_pid;
error = copyout(&buf->idx,
&d->request_indices[i],
sizeof(buf->idx));
if (error)
return error;
error = copyout(&buf->total,
&d->request_sizes[i],
sizeof(buf->total));
if (error)
return error;
++d->granted_count;
}
return 0;
}
int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
{
int order;
int retcode = 0;
int tmp_order;
order = drm_order(dma->request_size);
dma->granted_count = 0;
retcode = drm_dma_get_buffers_of_order(dev, dma, order);
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
for (tmp_order = order - 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order >= DRM_MIN_ORDER;
--tmp_order) {
retcode = drm_dma_get_buffers_of_order(dev, dma,
tmp_order);
}
}
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
for (tmp_order = order + 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order <= DRM_MAX_ORDER;
++tmp_order) {
retcode = drm_dma_get_buffers_of_order(dev, dma,
tmp_order);
}
}
return 0;
}

View File

@ -1,50 +0,0 @@
/* drawable.c -- IOCTLs for drawables -*- c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_adddraw(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_draw_t draw;
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
*(drm_draw_t *) data = draw;
return 0;
}
int drm_rmdraw(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
return 0; /* NOOP */
}

View File

@ -1,418 +0,0 @@
/* drmstat.c -- DRM device status and testing program
* Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com
* Revised: Sun Aug 1 11:02:00 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.28 1999/08/04 18:12:11 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drm/drmstat.c,v 1.1 2000/06/17 00:03:30 martin Exp $
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <strings.h>
#include <errno.h>
#include <signal.h>
#include <fcntl.h>
#include "xf86drm.h"
int sigio_fd;
static double usec(struct timeval *end, struct timeval *start)
{
double e = end->tv_sec * 1000000 + end->tv_usec;
double s = start->tv_sec * 1000000 + start->tv_usec;
return e - s;
}
static void getversion(int fd)
{
drmVersionPtr version;
version = drmGetVersion(fd);
if (version) {
printf( "Name: %s\n", version->name ? version->name : "?" );
printf( " Version: %d.%d.%d\n",
version->version_major,
version->version_minor,
version->version_patchlevel );
printf( " Date: %s\n", version->date ? version->date : "?" );
printf( " Desc: %s\n", version->desc ? version->desc : "?" );
drmFreeVersion(version);
} else {
printf( "No driver available\n" );
}
}
void handler(int fd, void *oldctx, void *newctx)
{
printf("Got fd %d\n", fd);
}
void process_sigio(char *device)
{
int fd;
printf("%s\n", device);
if ((fd = open(device, 0)) < 0) {
printf("%d\n", errno);
drmError(-errno, __FUNCTION__);
exit(1);
}
sigio_fd = fd;
drmInstallSIGIOHandler(fd, handler);
for (;;) sleep(60);
}
int main(int argc, char **argv)
{
int c;
int r = 0;
int fd = -1;
drmHandle handle;
void *address;
char *pt;
unsigned long count;
unsigned long offset;
unsigned long size;
drmContext context;
int loops;
char buf[1024];
int i;
drmBufInfoPtr info;
drmBufMapPtr bufs;
drmLockPtr lock;
int secs;
while ((c = getopt(argc, argv,
"lc:vo:O:f:s:w:W:b:r:R:P:L:C:XS:B:F:")) != EOF)
switch (c) {
case 'F':
count = strtoul(optarg, NULL, 0);
if (!fork()) {
dup(fd);
sleep(count);
}
close(fd);
break;
case 'v': getversion(fd); break;
case 'X':
if ((r = drmCreateContext(fd, &context))) {
drmError(r, argv[0]);
return 1;
}
printf( "Got %d\n", context);
break;
case 'S':
process_sigio(optarg);
break;
case 'C':
if ((r = drmSwitchToContext(fd, strtoul(optarg, NULL, 0)))) {
drmError(r, argv[0]);
return 1;
}
break;
case 'c':
if ((r = drmSetBusid(fd,optarg))) {
drmError(r, argv[0]);
return 1;
}
break;
case 'o':
if ((fd = drmOpen(optarg, NULL)) < 0) {
drmError(fd, argv[0]);
return 1;
}
break;
case 'O':
if ((fd = drmOpen(NULL, optarg)) < 0) {
drmError(fd, argv[0]);
return 1;
}
break;
case 'B': /* Test buffer allocation */
count = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, &pt, 0);
secs = strtoul(pt+1, NULL, 0);
{
drmDMAReq dma;
int *indices, *sizes;
indices = alloca(sizeof(*indices) * count);
sizes = alloca(sizeof(*sizes) * count);
dma.context = context;
dma.send_count = 0;
dma.request_count = count;
dma.request_size = size;
dma.request_list = indices;
dma.request_sizes = sizes;
dma.flags = DRM_DMA_WAIT;
if ((r = drmDMA(fd, &dma))) {
drmError(r, argv[0]);
return 1;
}
for (i = 0; i < dma.granted_count; i++) {
printf("%5d: index = %d, size = %d\n",
i, dma.request_list[i], dma.request_sizes[i]);
}
sleep(secs);
drmFreeBufs(fd, dma.granted_count, indices);
}
break;
case 'b':
count = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
if ((r = drmAddBufs(fd, count, size, 0, 0)) < 0) {
drmError(r, argv[0]);
return 1;
}
if (!(info = drmGetBufInfo(fd))) {
drmError(0, argv[0]);
return 1;
}
for (i = 0; i < info->count; i++) {
printf("%5d buffers of size %6d (low = %d, high = %d)\n",
info->list[i].count,
info->list[i].size,
info->list[i].low_mark,
info->list[i].high_mark);
}
if ((r = drmMarkBufs(fd, 0.50, 0.80))) {
drmError(r, argv[0]);
return 1;
}
if (!(info = drmGetBufInfo(fd))) {
drmError(0, argv[0]);
return 1;
}
for (i = 0; i < info->count; i++) {
printf("%5d buffers of size %6d (low = %d, high = %d)\n",
info->list[i].count,
info->list[i].size,
info->list[i].low_mark,
info->list[i].high_mark);
}
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
#if 1
if (!(bufs = drmMapBufs(fd))) {
drmError(0, argv[0]);
return 1;
}
printf("===============================\n");
printf( "%d bufs\n", bufs->count);
for (i = 0; i < bufs->count; i++) {
printf( " %4d: %8d bytes at %p\n",
i,
bufs->list[i].total,
bufs->list[i].address);
}
printf("===== /proc/drm/1/vmainfo =====\n");
sprintf(buf, "cat /proc/drm/1/vmainfo");
system(buf);
#endif
break;
case 'f':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
handle = 0;
if ((r = drmAddMap(fd, offset, size,
DRM_FRAME_BUFFER, 0, &handle))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx added\n", offset, size);
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
break;
case 'r':
case 'R':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
handle = 0;
if ((r = drmAddMap(fd, offset, size,
DRM_REGISTERS,
c == 'R' ? DRM_READ_ONLY : 0,
&handle))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx added\n", offset, size);
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
break;
case 's':
size = strtoul(optarg, &pt, 0);
handle = 0;
if ((r = drmAddMap(fd, 0, size,
DRM_SHM, DRM_CONTAINS_LOCK,
&handle))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%04lx byte shm added at 0x%08lx\n", size, handle);
sprintf(buf, "sysctl hw.graphics.0.vm");
system(buf);
break;
case 'P':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
address = NULL;
if ((r = drmMap(fd, offset, size, &address))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
offset, size, address, getpid());
printf("===== hw.graphics.0.vma =====\n");
sprintf(buf, "sysctl hw.graphics.0.vma");
system(buf);
mprotect((void *)offset, size, PROT_READ);
printf("===== hw.graphics.0.vma =====\n");
sprintf(buf, "sysctl hw.graphics.0.vma");
system(buf);
break;
case 'w':
case 'W':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
address = NULL;
if ((r = drmMap(fd, offset, size, &address))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
offset, size, address, getpid());
printf("===== /proc/%d/maps =====\n", getpid());
sprintf(buf, "cat /proc/%d/maps", getpid());
system(buf);
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
printf("===== /proc/drm/1/vmainfo =====\n");
sprintf(buf, "cat /proc/drm/1/vmainfo");
system(buf);
printf("===== READING =====\n");
for (i = 0; i < 0x10; i++)
printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
printf("\n");
if (c == 'w') {
printf("===== WRITING =====\n");
for (i = 0; i < size; i+=2) {
((char *)address)[i] = i & 0xff;
((char *)address)[i+1] = i & 0xff;
}
}
printf("===== READING =====\n");
for (i = 0; i < 0x10; i++)
printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
printf("\n");
printf("===== /proc/drm/1/vmainfo =====\n");
sprintf(buf, "cat /proc/drm/1/vmainfo");
system(buf);
break;
case 'L':
context = strtoul(optarg, &pt, 0);
offset = strtoul(pt+1, &pt, 0);
size = strtoul(pt+1, &pt, 0);
loops = strtoul(pt+1, NULL, 0);
address = NULL;
if ((r = drmMap(fd, offset, size, &address))) {
drmError(r, argv[0]);
return 1;
}
lock = address;
#if 1
{
int counter = 0;
struct timeval loop_start, loop_end;
struct timeval lock_start, lock_end;
double wt;
#define HISTOSIZE 9
int histo[HISTOSIZE];
int output = 0;
int fast = 0;
if (loops < 0) {
loops = -loops;
++output;
}
for (i = 0; i < HISTOSIZE; i++) histo[i] = 0;
gettimeofday(&loop_start, NULL);
for (i = 0; i < loops; i++) {
gettimeofday(&lock_start, NULL);
DRM_LIGHT_LOCK_COUNT(fd,lock,context,fast);
gettimeofday(&lock_end, NULL);
DRM_UNLOCK(fd,lock,context);
++counter;
wt = usec(&lock_end, &lock_start);
if (wt <= 2.5) ++histo[8];
if (wt < 5.0) ++histo[0];
else if (wt < 50.0) ++histo[1];
else if (wt < 500.0) ++histo[2];
else if (wt < 5000.0) ++histo[3];
else if (wt < 50000.0) ++histo[4];
else if (wt < 500000.0) ++histo[5];
else if (wt < 5000000.0) ++histo[6];
else ++histo[7];
if (output) printf( "%.2f uSec, %d fast\n", wt, fast);
}
gettimeofday(&loop_end, NULL);
printf( "Average wait time = %.2f usec, %d fast\n",
usec(&loop_end, &loop_start) / counter, fast);
printf( "%9d <= 2.5 uS\n", histo[8]);
printf( "%9d < 5 uS\n", histo[0]);
printf( "%9d < 50 uS\n", histo[1]);
printf( "%9d < 500 uS\n", histo[2]);
printf( "%9d < 5000 uS\n", histo[3]);
printf( "%9d < 50000 uS\n", histo[4]);
printf( "%9d < 500000 uS\n", histo[5]);
printf( "%9d < 5000000 uS\n", histo[6]);
printf( "%9d >= 5000000 uS\n", histo[7]);
}
#else
printf( "before lock: 0x%08x\n", lock->lock);
printf( "lock: 0x%08x\n", lock->lock);
sleep(5);
printf( "unlock: 0x%08x\n", lock->lock);
#endif
break;
default:
fprintf( stderr, "Usage: drmstat [options]\n" );
return 1;
}
return r;
}

View File

@ -1,261 +0,0 @@
/* fops.c -- File operations for DRM -*- c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <sys/signalvar.h>
#include <sys/poll.h>
drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p)
{
uid_t uid = p->p_cred->p_svuid;
pid_t pid = p->p_pid;
drm_file_t *priv;
TAILQ_FOREACH(priv, &dev->files, link)
if (priv->pid == pid && priv->uid == uid)
return priv;
return NULL;
}
/* drm_open is called whenever a process opens /dev/drm. */
int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
drm_device_t *dev)
{
int m = minor(kdev);
drm_file_t *priv;
if (flags & O_EXCL)
return EBUSY; /* No exclusive opens */
dev->flags = flags;
DRM_DEBUG("pid = %d, device = %p, minor = %d\n",
p->p_pid, dev->device, m);
priv = drm_find_file_by_proc(dev, p);
if (priv) {
priv->refs++;
} else {
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
memset(priv, 0, sizeof(*priv));
priv->uid = p->p_cred->p_svuid;
priv->pid = p->p_pid;
priv->refs = 1;
priv->minor = m;
priv->devXX = dev;
priv->ioctl_count = 0;
priv->authenticated = !suser(p);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
TAILQ_INSERT_TAIL(&dev->files, priv, link);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
}
kdev->si_drv1 = dev;
return 0;
}
int drm_write(dev_t kdev, struct uio *uio, int ioflag)
{
struct proc *p = curproc;
drm_device_t *dev = kdev->si_drv1;
DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
p->p_pid, dev->device, dev->open_count);
return 0;
}
/* drm_release is called whenever a process closes /dev/drm*. */
int drm_close(dev_t kdev, int fflag, int devtype, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
p->p_pid, dev->device, dev->open_count);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.pid == p->p_pid) {
DRM_ERROR("Process %d dead, freeing lock for context %d\n",
p->p_pid,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev,
&dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
}
drm_reclaim_buffers(dev, priv->pid);
funsetown(dev->buf_sigio);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
priv = drm_find_file_by_proc(dev, p);
if (priv) {
priv->refs--;
if (!priv->refs) {
TAILQ_REMOVE(&dev->files, priv, link);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
}
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
return 0;
}
/* The drm_read and drm_write_string code (especially that which manages
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
ssize_t drm_read(dev_t kdev, struct uio *uio, int ioflag)
{
drm_device_t *dev = kdev->si_drv1;
int left;
int avail;
int send;
int cur;
int error = 0;
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (dev->flags & FASYNC) {
return EWOULDBLOCK;
}
error = tsleep(&dev->buf_rp, PZERO|PCATCH, "drmrd", 0);
if (error) {
DRM_DEBUG(" interrupted\n");
return error;
}
DRM_DEBUG(" awake\n");
}
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
send = DRM_MIN(avail, uio->uio_resid);
while (send) {
if (dev->buf_wp > dev->buf_rp) {
cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
error = uiomove(dev->buf_rp, cur, uio);
if (error)
break;
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;
}
wakeup(&dev->buf_wp);
return error;
}
int drm_write_string(drm_device_t *dev, const char *s)
{
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
int send = strlen(s);
int count;
DRM_DEBUG("%d left, %d to send (%p, %p)\n",
left, send, dev->buf_rp, dev->buf_wp);
if (left == 1 || dev->buf_wp != dev->buf_rp) {
DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
left,
dev->buf_wp,
dev->buf_rp);
}
while (send) {
if (dev->buf_wp >= dev->buf_rp) {
count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
if (count == left) --count; /* Leave a hole */
} else {
count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
}
strncpy(dev->buf_wp, s, count);
dev->buf_wp += count;
if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
send -= count;
}
if (dev->buf_selecting) {
dev->buf_selecting = 0;
selwakeup(&dev->buf_sel);
}
DRM_DEBUG("dev->buf_sigio=%p\n", dev->buf_sigio);
if (dev->buf_sigio) {
DRM_DEBUG("dev->buf_sigio->sio_pgid=%d\n", dev->buf_sigio->sio_pgid);
pgsigio(dev->buf_sigio, SIGIO, 0);
}
DRM_DEBUG("waking\n");
wakeup(&dev->buf_rp);
return 0;
}
int drm_poll(dev_t kdev, int events, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int s;
int revents = 0;
s = spldrm();
if (events & (POLLIN | POLLRDNORM)) {
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
if (left > 0)
revents |= events & (POLLIN | POLLRDNORM);
else
selrecord(p, &dev->buf_sel);
}
splx(s);
return revents;
}

View File

@ -1,100 +0,0 @@
/* init.c -- Setup/Cleanup for DRM -*- c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
MODULE_VERSION(drm, 1);
int drm_flags = 0;
/* drm_parse_option parses a single option. See description for
drm_parse_drm for details. */
static void drm_parse_option(char *s)
{
char *c, *r;
DRM_DEBUG("\"%s\"\n", s);
if (!s || !*s) return;
for (c = s; *c && *c != ':'; c++); /* find : or \0 */
if (*c) r = c + 1; else r = NULL; /* remember remainder */
*c = '\0'; /* terminate */
if (!strcmp(s, "noctx")) {
drm_flags |= DRM_FLAG_NOCTX;
DRM_INFO("Server-mediated context switching OFF\n");
return;
}
if (!strcmp(s, "debug")) {
drm_flags |= DRM_FLAG_DEBUG;
DRM_INFO("Debug messages ON\n");
return;
}
DRM_ERROR("\"%s\" is not a valid option\n", s);
return;
}
/* drm_parse_options parse the insmod "drm=" options, or the command-line
* options passed to the kernel via LILO. The grammar of the format is as
* follows:
*
* drm ::= 'drm=' option_list
* option_list ::= option [ ';' option_list ]
* option ::= 'device:' major
* | 'debug'
* | 'noctx'
* major ::= INTEGER
*
* Note that 's' contains option_list without the 'drm=' part.
*
* device=major,minor specifies the device number used for /dev/drm
* if major == 0 then the misc device is used
* if major == 0 and minor == 0 then dynamic misc allocation is used
* debug=on specifies that debugging messages will be printk'd
* debug=trace specifies that each function call will be logged via printk
* debug=off turns off all debugging options
*
*/
void drm_parse_options(char *s)
{
char *h, *t, *n;
DRM_DEBUG("\"%s\"\n", s ?: "");
if (!s || !*s) return;
for (h = t = n = s; h && *h; h = n) {
for (; *t && *t != ';'; t++); /* find ; or \0 */
if (*t) n = t + 1; else n = NULL; /* remember next */
*t = '\0'; /* terminate */
drm_parse_option(h); /* parse */
}
}

View File

@ -1,120 +0,0 @@
/* ioctl.c -- IOCTL processing for DRM -*- c -*-
* Created: Fri Jan 8 09:01:26 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <sys/bus.h>
#include <pci/pcivar.h>
int
drm_irq_busid(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_irq_busid_t id;
devclass_t pci;
device_t bus, dev;
device_t *kids;
int error, i, num_kids;
id = *(drm_irq_busid_t *) data;
pci = devclass_find("pci");
if (!pci)
return ENOENT;
bus = devclass_get_device(pci, id.busnum);
if (!bus)
return ENOENT;
error = device_get_children(bus, &kids, &num_kids);
if (error)
return error;
dev = 0;
for (i = 0; i < num_kids; i++) {
dev = kids[i];
if (pci_get_slot(dev) == id.devnum
&& pci_get_function(dev) == id.funcnum)
break;
}
free(kids, M_TEMP);
if (i != num_kids)
id.irq = pci_get_irq(dev);
else
id.irq = 0;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
id.busnum, id.devnum, id.funcnum, id.irq);
*(drm_irq_busid_t *) data = id;
return 0;
}
int
drm_getunique(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_unique_t u;
int error;
u = *(drm_unique_t *) data;
if (u.unique_len >= dev->unique_len) {
error = copyout(dev->unique, u.unique, dev->unique_len);
if (error)
return error;
}
u.unique_len = dev->unique_len;
*(drm_unique_t *) data = u;
return 0;
}
int
drm_setunique(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_unique_t u;
int error;
if (dev->unique_len || dev->unique) return EBUSY;
u = *(drm_unique_t *) data;
dev->unique_len = u.unique_len;
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
error = copyin(u.unique, dev->unique, dev->unique_len);
if (error)
return error;
dev->unique[dev->unique_len] = '\0';
dev->devname = drm_alloc(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER);
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
return 0;
}

View File

@ -1,278 +0,0 @@
/* lists.c -- Buffer list handling routines -*- c -*-
* Created: Mon Apr 19 20:54:22 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_waitlist_create(drm_waitlist_t *bl, int count)
{
DRM_DEBUG("%d\n", count);
if (bl->count) return EINVAL;
bl->count = count;
bl->bufs = drm_alloc((bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->rp = bl->bufs;
bl->wp = bl->bufs;
bl->end = &bl->bufs[bl->count+1];
simple_lock_init(&bl->write_lock);
simple_lock_init(&bl->read_lock);
return 0;
}
int drm_waitlist_destroy(drm_waitlist_t *bl)
{
DRM_DEBUG("\n");
if (bl->rp != bl->wp) return EINVAL;
if (bl->bufs) drm_free(bl->bufs,
(bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->count = 0;
bl->bufs = NULL;
bl->rp = NULL;
bl->wp = NULL;
bl->end = NULL;
return 0;
}
int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf)
{
int left;
int s;
left = DRM_LEFTCOUNT(bl);
DRM_DEBUG("put %d (%d left, rp = %p, wp = %p)\n",
buf->idx, left, bl->rp, bl->wp);
if (!left) {
DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
buf->idx, buf->pid);
return EINVAL;
}
#if DRM_DMA_HISTOGRAM
getnanotime(&buf->time_queued);
#endif
buf->list = DRM_LIST_WAIT;
simple_lock(&bl->write_lock);
s = spldrm();
*bl->wp = buf;
if (++bl->wp >= bl->end) bl->wp = bl->bufs;
splx(s);
simple_unlock(&bl->write_lock);
return 0;
}
drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl)
{
drm_buf_t *buf;
int s;
simple_lock(&bl->read_lock);
s = spldrm();
buf = *bl->rp;
if (bl->rp == bl->wp) {
splx(s);
simple_unlock(&bl->read_lock);
return NULL;
}
if (++bl->rp >= bl->end) bl->rp = bl->bufs;
splx(s);
simple_unlock(&bl->read_lock);
DRM_DEBUG("get %d\n", buf->idx);
return buf;
}
int drm_freelist_create(drm_freelist_t *bl, int count)
{
DRM_DEBUG("\n");
atomic_set(&bl->count, 0);
bl->next = NULL;
bl->waiting = 0;
bl->low_mark = 0;
bl->high_mark = 0;
atomic_set(&bl->wfh, 0);
/* bl->lock = SPIN_LOCK_UNLOCKED; */
++bl->initialized;
return 0;
}
int drm_freelist_destroy(drm_freelist_t *bl)
{
DRM_DEBUG("\n");
atomic_set(&bl->count, 0);
bl->next = NULL;
return 0;
}
int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
{
unsigned int old;
unsigned int new;
char failed;
int count = 0;
drm_device_dma_t *dma = dev->dma;
if (!dma) {
DRM_ERROR("No DMA support\n");
return 1;
}
if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
buf->waiting, buf->pending);
if (!bl) return 1;
#if DRM_DMA_HISTOGRAM
getnanotime(&buf->time_freed);
drm_histogram_compute(dev, buf);
#endif
buf->list = DRM_LIST_FREE;
/*
do {
old = (unsigned long)bl->next;
buf->next = (void *)old;
new = (unsigned long)buf;
_DRM_CAS(&bl->next, old, new, failed);
if (++count > DRM_LOOPING_LIMIT) {
DRM_ERROR("Looping\n");
return 1;
}
} while (failed);
*/
simple_lock(&bl->lock);
buf->next = bl->next;
bl->next = buf;
simple_unlock(&bl->lock);
atomic_inc(&bl->count);
if (atomic_read(&bl->count) > dma->buf_count) {
DRM_ERROR("%d of %d buffers free after addition of %d\n",
atomic_read(&bl->count), dma->buf_count, buf->idx);
return 1;
}
/* Check for high water mark */
if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
atomic_set(&bl->wfh, 0);
if (bl->waiting)
wakeup(&bl->waiting);
}
return 0;
}
static drm_buf_t *drm_freelist_try(drm_freelist_t *bl)
{
unsigned int old;
unsigned int new;
char failed;
drm_buf_t *buf;
int count = 0;
if (!bl) return NULL;
/* Get buffer */
/*
do {
old = (unsigned int)bl->next;
if (!old) {
return NULL;
}
new = (unsigned long)bl->next->next;
_DRM_CAS(&bl->next, old, new, failed);
if (++count > DRM_LOOPING_LIMIT) {
DRM_ERROR("Looping\n");
return NULL;
}
} while (failed);
atomic_dec(&bl->count);
buf = (drm_buf_t *)old;
*/
simple_lock(&bl->lock);
if(!bl->next){
simple_unlock(&bl->lock);
return NULL;
}
buf = bl->next;
bl->next = bl->next->next;
simple_unlock(&bl->lock);
atomic_dec(&bl->count);
buf->next = NULL;
buf->list = DRM_LIST_NONE;
DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
buf->waiting, buf->pending);
if (buf->waiting || buf->pending) {
DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
return buf;
}
drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block)
{
drm_buf_t *buf = NULL;
int error;
if (!bl || !bl->initialized) return NULL;
/* Check for low water mark */
if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
atomic_set(&bl->wfh, 1);
if (atomic_read(&bl->wfh)) {
DRM_DEBUG("Block = %d, count = %d, wfh = %d\n",
block, atomic_read(&bl->count),
atomic_read(&bl->wfh));
if (block) {
atomic_inc(&bl->waiting);
for (;;) {
if (!atomic_read(&bl->wfh)
&& (buf = drm_freelist_try(bl))) break;
error = tsleep(&bl->waiting, PZERO|PCATCH,
"drmfg", 0);
if (error)
break;
}
atomic_dec(&bl->waiting);
}
return buf;
}
DRM_DEBUG("Count = %d, wfh = %d\n",
atomic_read(&bl->count), atomic_read(&bl->wfh));
return drm_freelist_try(bl);
}

View File

@ -1,223 +0,0 @@
/* lock.c -- IOCTLs for locking -*- c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
int
drm_block(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
DRM_DEBUG("\n");
return 0;
}
int
drm_unblock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
DRM_DEBUG("\n");
return 0;
}
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old;
unsigned int new;
char failed;
DRM_DEBUG("%d attempts\n", context);
do {
old = *lock;
if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
else new = context | _DRM_LOCK_HELD;
_DRM_CAS(lock, old, new, failed);
} while (failed);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
/* Have lock */
DRM_DEBUG("%d\n", context);
return 1;
}
DRM_DEBUG("%d unable to get lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 0;
}
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old;
unsigned int new;
char failed;
dev->lock.pid = 0;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
_DRM_CAS(lock, old, new, failed);
} while (failed);
DRM_DEBUG("%d => %d\n", _DRM_LOCKING_CONTEXT(old), context);
return 1;
}
int drm_lock_free(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old;
unsigned int new;
char failed;
pid_t pid = dev->lock.pid;
DRM_DEBUG("%d\n", context);
dev->lock.pid = 0;
do {
old = *lock;
new = 0;
_DRM_CAS(lock, old, new, failed);
} while (failed);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
context,
_DRM_LOCKING_CONTEXT(old),
pid);
return 1;
}
wakeup(&dev->lock.lock_queue);
return 0;
}
static int drm_flush_queue(drm_device_t *dev, int context)
{
int ret = 0;
int error;
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
atomic_inc(&q->block_write);
atomic_inc(&q->block_count);
for (;;) {
if (!DRM_BUFCOUNT(&q->waitlist)) break;
error = tsleep(&q->flush_queue, PCATCH|PZERO, "drmfq", 0);
if (error)
return error;
}
atomic_dec(&q->block_count);
}
atomic_dec(&q->use_count);
atomic_inc(&q->total_flushed);
/* NOTE: block_write is still incremented!
Use drm_flush_unlock_queue to decrement. */
return ret;
}
static int drm_flush_unblock_queue(drm_device_t *dev, int context)
{
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
if (atomic_read(&q->block_write)) {
atomic_dec(&q->block_write);
wakeup(&q->write_queue);
}
}
atomic_dec(&q->use_count);
return 0;
}
int drm_flush_block_and_flush(drm_device_t *dev, int context,
drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = drm_flush_queue(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = drm_flush_queue(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = drm_flush_queue(dev, i);
}
}
return ret;
}
int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = drm_flush_unblock_queue(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = drm_flush_unblock_queue(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = drm_flush_unblock_queue(dev, i);
}
}
return ret;
}
int drm_finish(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int ret = 0;
drm_lock_t lock;
DRM_DEBUG("\n");
lock = *(drm_lock_t *) data;
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
drm_flush_unblock(dev, lock.context, lock.flags);
return ret;
}

View File

@ -1,458 +0,0 @@
/* memory.c -- Memory management wrappers for DRM -*- c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <vm/vm.h>
#include <vm/pmap.h>
#ifdef DRM_AGP
#include <sys/agpio.h>
#endif
MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");
typedef struct drm_mem_stats {
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
} drm_mem_stats_t;
#ifdef SMP
static struct simplelock drm_mem_lock;
#endif
static unsigned long drm_ram_available = 0;
static unsigned long drm_ram_used = 0;
static drm_mem_stats_t drm_mem_stats[] = {
[DRM_MEM_DMA] = { "dmabufs" },
[DRM_MEM_SAREA] = { "sareas" },
[DRM_MEM_DRIVER] = { "driver" },
[DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" },
[DRM_MEM_FILES] = { "files" },
[DRM_MEM_QUEUES] = { "queues" },
[DRM_MEM_CMDS] = { "commands" },
[DRM_MEM_MAPPINGS] = { "mappings" },
[DRM_MEM_BUFLISTS] = { "buflists" },
[DRM_MEM_AGPLISTS] = { "agplist" },
[DRM_MEM_TOTALAGP] = { "totalagp" },
[DRM_MEM_BOUNDAGP] = { "boundagp" },
[DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
{ NULL, 0, } /* Last entry must be null */
};
void drm_mem_init(void)
{
drm_mem_stats_t *mem;
for (mem = drm_mem_stats; mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->bytes_allocated = 0;
mem->bytes_freed = 0;
}
drm_ram_available = 0; /* si.totalram; */
drm_ram_used = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
static int _drm_mem_info DRM_SYSCTL_HANDLER_ARGS
{
drm_mem_stats_t *pt;
char buf[128];
int error;
DRM_SYSCTL_PRINT(" total counts "
" | outstanding \n");
DRM_SYSCTL_PRINT("type alloc freed fail bytes freed"
" | allocs bytes\n\n");
DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
"system", 0, 0, 0, drm_ram_available);
DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
"locked", 0, 0, 0, drm_ram_used);
DRM_SYSCTL_PRINT("\n");
for (pt = drm_mem_stats; pt->name; pt++) {
DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
pt->name,
pt->succeed_count,
pt->free_count,
pt->fail_count,
pt->bytes_allocated,
pt->bytes_freed,
pt->succeed_count - pt->free_count,
(long)pt->bytes_allocated
- (long)pt->bytes_freed);
}
SYSCTL_OUT(req, "", 1);
return 0;
}
int drm_mem_info DRM_SYSCTL_HANDLER_ARGS
{
int ret;
simple_lock(&drm_mem_lock);
ret = _drm_mem_info(oidp, arg1, arg2, req);
simple_unlock(&drm_mem_lock);
return ret;
}
void *drm_alloc(size_t size, int area)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
return NULL;
}
if (!(pt = malloc(size, M_DRM, M_NOWAIT))) {
simple_lock(&drm_mem_lock);
++drm_mem_stats[area].fail_count;
simple_unlock(&drm_mem_lock);
return NULL;
}
simple_lock(&drm_mem_lock);
++drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_allocated += size;
simple_unlock(&drm_mem_lock);
return pt;
}
void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
{
void *pt;
if (!(pt = drm_alloc(size, area))) return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
drm_free(oldpt, oldsize, area);
}
return pt;
}
char *drm_strdup(const char *s, int area)
{
char *pt;
int length = s ? strlen(s) : 0;
if (!(pt = drm_alloc(length+1, area))) return NULL;
strcpy(pt, s);
return pt;
}
void drm_strfree(char *s, int area)
{
unsigned int size;
if (!s) return;
size = 1 + (s ? strlen(s) : 0);
drm_free((void *)s, size, area);
}
void drm_free(void *pt, size_t size, int area)
{
int alloc_count;
int free_count;
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else free(pt, M_DRM);
simple_lock(&drm_mem_lock);
drm_mem_stats[area].bytes_freed += size;
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
simple_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
unsigned long drm_alloc_pages(int order, int area)
{
vm_offset_t address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
simple_lock(&drm_mem_lock);
if (drm_ram_used > +(DRM_RAM_PERCENT * drm_ram_available) / 100) {
simple_unlock(&drm_mem_lock);
return 0;
}
simple_unlock(&drm_mem_lock);
address = (vm_offset_t) contigmalloc(1<<order, M_DRM, M_WAITOK, 0, ~0, 1, 0);
if (!address) {
simple_lock(&drm_mem_lock);
++drm_mem_stats[area].fail_count;
simple_unlock(&drm_mem_lock);
return 0;
}
simple_lock(&drm_mem_lock);
++drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_allocated += bytes;
drm_ram_used += bytes;
simple_unlock(&drm_mem_lock);
/* Zero outside the lock */
memset((void *)address, 0, bytes);
/* Reserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
/* mem_map_reserve(MAP_NR(addr));*/
}
return address;
}
void drm_free_pages(unsigned long address, int order, int area)
{
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
unsigned long addr;
unsigned int sz;
if (!address) {
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
} else {
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
/* mem_map_unreserve(MAP_NR(addr));*/
}
contigfree((void *) address, bytes, M_DRM);
}
simple_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_freed += bytes;
drm_ram_used -= bytes;
simple_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
void *drm_ioremap(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = pmap_mapdev(offset, size))) {
simple_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
simple_unlock(&drm_mem_lock);
return NULL;
}
simple_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
simple_unlock(&drm_mem_lock);
return pt;
}
void drm_ioremapfree(void *pt, unsigned long size)
{
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
pmap_unmapdev((vm_offset_t) pt, size);
simple_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
simple_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
#ifdef DRM_AGP
void *drm_alloc_agp(int pages, u_int32_t type)
{
device_t dev = agp_find_device();
void *handle;
if (!dev)
return NULL;
if (!pages) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
return NULL;
}
if ((handle = agp_alloc_memory(dev, type, pages << AGP_PAGE_SHIFT))) {
simple_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
simple_unlock(&drm_mem_lock);
return handle;
}
simple_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
simple_unlock(&drm_mem_lock);
return NULL;
}
int drm_free_agp(void *handle, int pages)
{
device_t dev = agp_find_device();
int alloc_count;
int free_count;
int retval = EINVAL;
if (!dev)
return EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Attempt to free NULL AGP handle\n");
return retval;
}
agp_free_memory(dev, handle);
simple_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
simple_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
return 0;
}
int drm_bind_agp(void *handle, unsigned int start)
{
device_t dev = agp_find_device();
int retcode = EINVAL;
struct agp_memory_info info;
DRM_DEBUG("drm_bind_agp called\n");
if (!dev)
return EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Attempt to bind NULL AGP handle\n");
return retcode;
}
if (!(retcode = agp_bind_memory(dev, handle,
start << AGP_PAGE_SHIFT))) {
simple_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
agp_memory_info(dev, handle, &info);
drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+= info.ami_size;
simple_unlock(&drm_mem_lock);
DRM_DEBUG("drm_agp.bind_memory: retcode %d\n", retcode);
return retcode;
}
simple_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
simple_unlock(&drm_mem_lock);
return retcode;
}
int drm_unbind_agp(void *handle)
{
device_t dev = agp_find_device();
int alloc_count;
int free_count;
int retcode = EINVAL;
struct agp_memory_info info;
if (!dev)
return EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Attempt to unbind NULL AGP handle\n");
return retcode;
}
agp_memory_info(dev, handle, &info);
if ((retcode = agp_unbind_memory(dev, handle)))
return retcode;
simple_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed += info.ami_size;
simple_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
return retcode;
}
#endif

View File

@ -1,568 +0,0 @@
/* proc.c -- /proc support for DRM -*- c -*-
* Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.4 1999/08/20 15:36:46 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drm/proc.c,v 1.2 2001/03/02 02:45:38 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
static struct proc_dir_entry *drm_root = NULL;
static struct proc_dir_entry *drm_dev_root = NULL;
static char drm_slot_name[64];
static int drm_name_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_vm_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_clients_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_queues_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
#endif
#if DRM_DMA_HISTOGRAM
static int drm_histo_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
#endif
struct drm_proc_list {
const char *name;
int (*f)(char *, char **, off_t, int, int *, void *);
} drm_proc_list[] = {
{ "name", drm_name_info },
{ "mem", drm_mem_info },
{ "vm", drm_vm_info },
{ "clients", drm_clients_info },
{ "queues", drm_queues_info },
{ "bufs", drm_bufs_info },
#if DRM_DEBUG_CODE
{ "vma", drm_vma_info },
#endif
#if DRM_DMA_HISTOGRAM
{ "histo", drm_histo_info },
#endif
};
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
int drm_proc_init(drm_device_t *dev)
{
struct proc_dir_entry *ent;
int i, j;
drm_root = create_proc_entry("graphics", S_IFDIR, NULL);
if (!drm_root) {
DRM_ERROR("Cannot create /proc/graphics\n");
return -1;
}
/* Instead of doing this search, we should
add some global support for /proc/graphics. */
for (i = 0; i < 8; i++) {
sprintf(drm_slot_name, "graphics/%d", i);
drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL);
if (!drm_dev_root) {
DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name);
remove_proc_entry("graphics", NULL);
}
if (drm_dev_root->nlink == 2) break;
drm_dev_root = NULL;
}
if (!drm_dev_root) {
DRM_ERROR("Cannot find slot in /proc/graphics\n");
return -1;
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
ent = create_proc_entry(drm_proc_list[i].name,
S_IFREG|S_IRUGO, drm_dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/%s/%s\n",
drm_slot_name, drm_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
drm_dev_root);
remove_proc_entry(drm_slot_name, NULL);
remove_proc_entry("graphics", NULL);
return -1;
}
ent->read_proc = drm_proc_list[i].f;
ent->data = dev;
}
return 0;
}
int drm_proc_cleanup(void)
{
int i;
if (drm_root) {
if (drm_dev_root) {
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
remove_proc_entry(drm_proc_list[i].name,
drm_dev_root);
}
remove_proc_entry(drm_slot_name, NULL);
}
remove_proc_entry("graphics", NULL);
remove_proc_entry(DRM_NAME, NULL);
}
drm_root = drm_dev_root = NULL;
return 0;
}
static int drm_name_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
if (dev->unique) {
DRM_PROC_PRINT("%s 0x%x %s\n",
dev->name, dev->device, dev->unique);
} else {
DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device);
}
return len;
}
static int _drm_vm_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_map_t *map;
const char *types[] = { "FB", "REG", "SHM", "AGP" };
const char *type;
int i;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
if (map->type < 0 || map->type > 3) type = "??";
else type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size,
type,
map->flags,
(unsigned long)map->handle);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
}
return len;
}
static int drm_vm_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_vm_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
static int _drm_queues_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int i;
drm_queue_t *q;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT(" ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
" locks\n\n");
for (i = 0; i < dev->queue_count; i++) {
q = dev->queuelist[i];
atomic_inc(&q->use_count);
DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
"%5d/0x%03x %5d %5d"
" %5d/%c%c/%c%c%c %5Zd %10d %10d %10d\n",
i,
q->flags,
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
waitqueue_active(&q->read_queue) ? 'r':'-',
waitqueue_active(&q->write_queue) ? 'w':'-',
waitqueue_active(&q->flush_queue) ? 'f':'-',
DRM_BUFCOUNT(&q->waitlist),
atomic_read(&q->total_flushed),
atomic_read(&q->total_queued),
atomic_read(&q->total_locks));
atomic_dec(&q->use_count);
}
return len;
}
static int drm_queues_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_queues_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
/* drm_bufs_info is called whenever a process reads
/dev/drm/<dev>/bufs. */
static int _drm_bufs_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return 0;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].buf_count)
DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
atomic_read(&dma->bufs[i]
.freelist.count),
dma->bufs[i].seg_count,
dma->bufs[i].seg_count
*(1 << dma->bufs[i].page_order),
(dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order))
* PAGE_SIZE / 1024);
}
DRM_PROC_PRINT("\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i%32)) DRM_PROC_PRINT("\n");
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
}
DRM_PROC_PRINT("\n");
return len;
}
static int drm_bufs_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_bufs_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
static int _drm_clients_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_file_t *priv;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
for (priv = dev->file_first; priv; priv = priv->next) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
priv->pid,
priv->uid,
priv->magic,
priv->ioctl_count);
}
return len;
}
static int drm_clients_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_clients_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
#if DRM_DEBUG_CODE
static int _drm_vma_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_vma_entry_t *pt;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long i;
struct vm_area_struct *vma;
unsigned long address;
#if defined(__i386__)
unsigned int pgprot;
#endif
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma)) continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
pt->pid,
vma->vm_start,
vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_offset );
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_4M ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
#endif
DRM_PROC_PRINT("\n");
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
pte = pte_offset(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx"
" %c%c%c%c%c\n",
i,
address,
pte_read(*pte) ? 'r' : '-',
pte_write(*pte) ? 'w' : '-',
pte_exec(*pte) ? 'x' : '-',
pte_dirty(*pte) ? 'd' : '-',
pte_young(*pte) ? 'a' : '-' );
} else {
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
}
}
return len;
}
static int drm_vma_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_vma_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
#endif
#if DRM_DMA_HISTOGRAM
static int _drm_histo_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_device_dma_t *dma = dev->dma;
int i;
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
unsigned long prev_value = 0;
drm_buf_t *buffer;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("general statistics:\n");
DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
DRM_PROC_PRINT("open %10u\n", atomic_read(&dev->total_open));
DRM_PROC_PRINT("close %10u\n", atomic_read(&dev->total_close));
DRM_PROC_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
DRM_PROC_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
DRM_PROC_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
DRM_PROC_PRINT("\nlock statistics:\n");
DRM_PROC_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
DRM_PROC_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
DRM_PROC_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
DRM_PROC_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
if (dma) {
DRM_PROC_PRINT("\ndma statistics:\n");
DRM_PROC_PRINT("prio %10u\n",
atomic_read(&dma->total_prio));
DRM_PROC_PRINT("bytes %10u\n",
atomic_read(&dma->total_bytes));
DRM_PROC_PRINT("dmas %10u\n",
atomic_read(&dma->total_dmas));
DRM_PROC_PRINT("missed:\n");
DRM_PROC_PRINT(" dma %10u\n",
atomic_read(&dma->total_missed_dma));
DRM_PROC_PRINT(" lock %10u\n",
atomic_read(&dma->total_missed_lock));
DRM_PROC_PRINT(" free %10u\n",
atomic_read(&dma->total_missed_free));
DRM_PROC_PRINT(" sched %10u\n",
atomic_read(&dma->total_missed_sched));
DRM_PROC_PRINT("tried %10u\n",
atomic_read(&dma->total_tried));
DRM_PROC_PRINT("hit %10u\n",
atomic_read(&dma->total_hit));
DRM_PROC_PRINT("lost %10u\n",
atomic_read(&dma->total_lost));
buffer = dma->next_buffer;
if (buffer) {
DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
} else {
DRM_PROC_PRINT("next_buffer none\n");
}
buffer = dma->this_buffer;
if (buffer) {
DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
} else {
DRM_PROC_PRINT("this_buffer none\n");
}
}
DRM_PROC_PRINT("\nvalues:\n");
if (dev->lock.hw_lock) {
DRM_PROC_PRINT("lock 0x%08x\n",
dev->lock.hw_lock->lock);
} else {
DRM_PROC_PRINT("lock none\n");
}
DRM_PROC_PRINT("context_flag 0x%08x\n", dev->context_flag);
DRM_PROC_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
DRM_PROC_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
DRM_PROC_PRINT("\n q2d d2c c2f"
" q2c q2f dma sch"
" ctx lacq lhld\n\n");
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
" %10u %10u %10u %10u %10u\n",
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
i == DRM_DMA_HISTOGRAM_SLOTS - 1
? prev_value : slot_value ,
atomic_read(&dev->histo
.queued_to_dispatched[i]),
atomic_read(&dev->histo
.dispatched_to_completed[i]),
atomic_read(&dev->histo
.completed_to_freed[i]),
atomic_read(&dev->histo
.queued_to_completed[i]),
atomic_read(&dev->histo
.queued_to_freed[i]),
atomic_read(&dev->histo.dma[i]),
atomic_read(&dev->histo.schedule[i]),
atomic_read(&dev->histo.ctx[i]),
atomic_read(&dev->histo.lacq[i]),
atomic_read(&dev->histo.lhld[i]));
prev_value = slot_value;
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
}
return len;
}
static int drm_histo_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_histo_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
#endif

View File

@ -1,554 +0,0 @@
/* proc.c -- /proc support for DRM -*- c -*-
* Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI$
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drm/sysctl.c,v 1.2 2001/03/02 02:45:38 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <sys/sysctl.h>
SYSCTL_NODE(_hw, OID_AUTO, dri, CTLFLAG_RW, 0, "DRI Graphics");
static int drm_name_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_queues_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS;
#if DRM_DEBUG_CODExx
static int drm_vma_info DRM_SYSCTL_HANDLER_ARGS;
#endif
#if DRM_DMA_HISTOGRAM
static int drm_histo_info DRM_SYSCTL_HANDLER_ARGS;
#endif
struct drm_sysctl_list {
const char *name;
int (*f) DRM_SYSCTL_HANDLER_ARGS;
} drm_sysctl_list[] = {
{ "name", drm_name_info },
{ "mem", drm_mem_info },
{ "vm", drm_vm_info },
{ "clients", drm_clients_info },
{ "queues", drm_queues_info },
{ "bufs", drm_bufs_info },
#if DRM_DEBUG_CODExx
{ "vma", drm_vma_info },
#endif
#if DRM_DMA_HISTOGRAM
{ "histo", drm_histo_info },
#endif
};
#define DRM_SYSCTL_ENTRIES (sizeof(drm_sysctl_list)/sizeof(drm_sysctl_list[0]))
struct drm_sysctl_info {
struct sysctl_oid oids[DRM_SYSCTL_ENTRIES + 1];
struct sysctl_oid_list list;
char name[2];
};
int drm_sysctl_init(drm_device_t *dev)
{
struct drm_sysctl_info *info;
struct sysctl_oid *oid;
struct sysctl_oid *top;
int i;
/* Find the next free slot under hw.graphics */
i = 0;
SLIST_FOREACH(oid, &sysctl__hw_dri_children, oid_link) {
if (i <= oid->oid_arg2)
i = oid->oid_arg2 + 1;
}
info = drm_alloc(sizeof *info, DRM_MEM_DRIVER);
dev->sysctl = info;
/* Construct the node under hw.graphics */
info->name[0] = '0' + i;
info->name[1] = 0;
oid = &info->oids[DRM_SYSCTL_ENTRIES];
bzero(oid, sizeof(*oid));
oid->oid_parent = &sysctl__hw_dri_children;
oid->oid_number = OID_AUTO;
oid->oid_kind = CTLTYPE_NODE | CTLFLAG_RW;
oid->oid_arg1 = &info->list;
oid->oid_arg2 = i;
oid->oid_name = info->name;
oid->oid_handler = 0;
oid->oid_fmt = "N";
SLIST_INIT(&info->list);
sysctl_register_oid(oid);
top = oid;
for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
oid = &info->oids[i];
bzero(oid, sizeof(*oid));
oid->oid_parent = top->oid_arg1;
oid->oid_number = OID_AUTO;
oid->oid_kind = CTLTYPE_INT | CTLFLAG_RD;
oid->oid_arg1 = dev;
oid->oid_arg2 = 0;
oid->oid_name = drm_sysctl_list[i].name;
oid->oid_handler = drm_sysctl_list[i].f;
oid->oid_fmt = "A";
sysctl_register_oid(oid);
}
return 0;
}
int drm_sysctl_cleanup(drm_device_t *dev)
{
int i;
DRM_DEBUG("dev->sysctl=%p\n", dev->sysctl);
for (i = 0; i < DRM_SYSCTL_ENTRIES + 1; i++)
sysctl_unregister_oid(&dev->sysctl->oids[i]);
drm_free(dev->sysctl, sizeof *dev->sysctl, DRM_MEM_DRIVER);
dev->sysctl = NULL;
return 0;
}
static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
char buf[128];
int error;
if (dev->unique) {
DRM_SYSCTL_PRINT("%s 0x%x %s\n",
dev->name, dev2udev(dev->devnode), dev->unique);
} else {
DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->devnode));
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int _drm_vm_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_map_t *map;
const char *types[] = { "FB", "REG", "SHM", "AGP" };
const char *type;
int i;
char buf[128];
int error;
DRM_SYSCTL_PRINT("slot offset size type flags "
"address mtrr\n\n");
error = SYSCTL_OUT(req, buf, strlen(buf));
if (error) return error;
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
if (map->type < 0 || map->type > 3) type = "??";
else type = types[map->type];
DRM_SYSCTL_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size,
type,
map->flags,
(unsigned long)map->handle);
if (map->mtrr < 0) {
DRM_SYSCTL_PRINT("none\n");
} else {
DRM_SYSCTL_PRINT("%4d\n", map->mtrr);
}
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
ret = _drm_vm_info(oidp, arg1, arg2, req);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return ret;
}
static int _drm_queues_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int i;
drm_queue_t *q;
char buf[128];
int error;
DRM_SYSCTL_PRINT(" ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
" locks\n\n");
for (i = 0; i < dev->queue_count; i++) {
q = dev->queuelist[i];
atomic_inc(&q->use_count);
DRM_SYSCTL_PRINT_RET(atomic_dec(&q->use_count),
"%5d/0x%03x %5d %5d"
" %5d/%c%c/%c%c%c %5Zd %10d %10d %10d\n",
i,
q->flags,
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
q->read_queue ? 'r':'-',
q->write_queue ? 'w':'-',
q->flush_queue ? 'f':'-',
DRM_BUFCOUNT(&q->waitlist),
atomic_read(&q->total_flushed),
atomic_read(&q->total_queued),
atomic_read(&q->total_locks));
atomic_dec(&q->use_count);
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int drm_queues_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
ret = _drm_queues_info(oidp, arg1, arg2, req);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return ret;
}
/* drm_bufs_info is called whenever a process reads
hw.dri.0.bufs. */
static int _drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_device_dma_t *dma = dev->dma;
int i;
char buf[128];
int error;
if (!dma) return 0;
DRM_SYSCTL_PRINT(" o size count free segs pages kB\n\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].buf_count)
DRM_SYSCTL_PRINT("%2d %8d %5d %5d %5d %5d %5d\n",
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
atomic_read(&dma->bufs[i]
.freelist.count),
dma->bufs[i].seg_count,
dma->bufs[i].seg_count
*(1 << dma->bufs[i].page_order),
(dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order))
* PAGE_SIZE / 1024);
}
DRM_SYSCTL_PRINT("\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i%32)) DRM_SYSCTL_PRINT("\n");
DRM_SYSCTL_PRINT(" %d", dma->buflist[i]->list);
}
DRM_SYSCTL_PRINT("\n");
SYSCTL_OUT(req, "", 1);
return 0;
}
static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
ret = _drm_bufs_info(oidp, arg1, arg2, req);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return ret;
}
static int _drm_clients_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_file_t *priv;
char buf[128];
int error;
DRM_SYSCTL_PRINT("a dev pid uid magic ioctls\n\n");
TAILQ_FOREACH(priv, &dev->files, link) {
DRM_SYSCTL_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
priv->pid,
priv->uid,
priv->magic,
priv->ioctl_count);
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
ret = _drm_clients_info(oidp, arg1, arg2, req);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return ret;
}
#if DRM_DEBUG_CODExx
static int _drm_vma_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_vma_entry_t *pt;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long i;
struct vm_area_struct *vma;
unsigned long address;
#if defined(__i386__)
unsigned int pgprot;
#endif
char buf[128];
int error;
DRM_SYSCTL_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma)) continue;
DRM_SYSCTL_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
pt->pid,
vma->vm_start,
vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_offset );
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
DRM_SYSCTL_PRINT(" %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_4M ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
#endif
DRM_SYSCTL_PRINT("\n");
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
pte = pte_offset(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
DRM_SYSCTL_PRINT(" 0x%08lx -> 0x%08lx"
" %c%c%c%c%c\n",
i,
address,
pte_read(*pte) ? 'r' : '-',
pte_write(*pte) ? 'w' : '-',
pte_exec(*pte) ? 'x' : '-',
pte_dirty(*pte) ? 'd' : '-',
pte_young(*pte) ? 'a' : '-' );
} else {
DRM_SYSCTL_PRINT(" 0x%08lx\n", i);
}
}
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int drm_vma_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
ret = _drm_vma_info(oidp, arg1, arg2, req);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return ret;
}
#endif
#if DRM_DMA_HISTOGRAM
static int _drm_histo_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_device_dma_t *dma = dev->dma;
int i;
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
unsigned long prev_value = 0;
drm_buf_t *buffer;
char buf[128];
int error;
DRM_SYSCTL_PRINT("general statistics:\n");
DRM_SYSCTL_PRINT("total %10u\n", atomic_read(&dev->histo.total));
DRM_SYSCTL_PRINT("open %10u\n", atomic_read(&dev->total_open));
DRM_SYSCTL_PRINT("close %10u\n", atomic_read(&dev->total_close));
DRM_SYSCTL_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
DRM_SYSCTL_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
DRM_SYSCTL_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
DRM_SYSCTL_PRINT("\nlock statistics:\n");
DRM_SYSCTL_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
DRM_SYSCTL_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
DRM_SYSCTL_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
DRM_SYSCTL_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
if (dma) {
DRM_SYSCTL_PRINT("\ndma statistics:\n");
DRM_SYSCTL_PRINT("prio %10u\n",
atomic_read(&dma->total_prio));
DRM_SYSCTL_PRINT("bytes %10u\n",
atomic_read(&dma->total_bytes));
DRM_SYSCTL_PRINT("dmas %10u\n",
atomic_read(&dma->total_dmas));
DRM_SYSCTL_PRINT("missed:\n");
DRM_SYSCTL_PRINT(" dma %10u\n",
atomic_read(&dma->total_missed_dma));
DRM_SYSCTL_PRINT(" lock %10u\n",
atomic_read(&dma->total_missed_lock));
DRM_SYSCTL_PRINT(" free %10u\n",
atomic_read(&dma->total_missed_free));
DRM_SYSCTL_PRINT(" sched %10u\n",
atomic_read(&dma->total_missed_sched));
DRM_SYSCTL_PRINT("tried %10u\n",
atomic_read(&dma->total_tried));
DRM_SYSCTL_PRINT("hit %10u\n",
atomic_read(&dma->total_hit));
DRM_SYSCTL_PRINT("lost %10u\n",
atomic_read(&dma->total_lost));
buffer = dma->next_buffer;
if (buffer) {
DRM_SYSCTL_PRINT("next_buffer %7d\n", buffer->idx);
} else {
DRM_SYSCTL_PRINT("next_buffer none\n");
}
buffer = dma->this_buffer;
if (buffer) {
DRM_SYSCTL_PRINT("this_buffer %7d\n", buffer->idx);
} else {
DRM_SYSCTL_PRINT("this_buffer none\n");
}
}
DRM_SYSCTL_PRINT("\nvalues:\n");
if (dev->lock.hw_lock) {
DRM_SYSCTL_PRINT("lock 0x%08x\n",
dev->lock.hw_lock->lock);
} else {
DRM_SYSCTL_PRINT("lock none\n");
}
DRM_SYSCTL_PRINT("context_flag 0x%08lx\n", dev->context_flag);
DRM_SYSCTL_PRINT("interrupt_flag 0x%08lx\n", dev->interrupt_flag);
DRM_SYSCTL_PRINT("dma_flag 0x%08lx\n", dev->dma_flag);
DRM_SYSCTL_PRINT("queue_count %10d\n", dev->queue_count);
DRM_SYSCTL_PRINT("last_context %10d\n", dev->last_context);
DRM_SYSCTL_PRINT("last_switch %10u\n", dev->last_switch);
DRM_SYSCTL_PRINT("last_checked %10d\n", dev->last_checked);
DRM_SYSCTL_PRINT("\n q2d d2c c2f"
" q2c q2f dma sch"
" ctx lacq lhld\n\n");
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
DRM_SYSCTL_PRINT("%s %10lu %10u %10u %10u %10u %10u"
" %10u %10u %10u %10u %10u\n",
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
i == DRM_DMA_HISTOGRAM_SLOTS - 1
? prev_value : slot_value ,
atomic_read(&dev->histo
.queued_to_dispatched[i]),
atomic_read(&dev->histo
.dispatched_to_completed[i]),
atomic_read(&dev->histo
.completed_to_freed[i]),
atomic_read(&dev->histo
.queued_to_completed[i]),
atomic_read(&dev->histo
.queued_to_freed[i]),
atomic_read(&dev->histo.dma[i]),
atomic_read(&dev->histo.schedule[i]),
atomic_read(&dev->histo.ctx[i]),
atomic_read(&dev->histo.lacq[i]),
atomic_read(&dev->histo.lhld[i]));
prev_value = slot_value;
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int drm_histo_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
ret = _drm_histo_info(oidp, arg1, arg2, req);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return ret;
}
#endif

View File

@ -1,104 +0,0 @@
/* vm.c -- Memory mapping for DRM -*- c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <vm/vm.h>
#include <vm/pmap.h>
static int drm_dma_mmap(dev_t kdev, vm_offset_t offset, int prot)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
unsigned long physical;
unsigned long page;
if (!dma) return -1; /* Error */
if (!dma->pagelist) return -1; /* Nothing allocated */
page = offset >> PAGE_SHIFT;
physical = dma->pagelist[page];
DRM_DEBUG("0x%08x (page %lu) => 0x%08lx\n", offset, page, physical);
return atop(physical);
}
int drm_mmap(dev_t kdev, vm_offset_t offset, int prot)
{
drm_device_t *dev = kdev->si_drv1;
drm_map_t *map = NULL;
int i;
/* DRM_DEBUG("offset = 0x%x\n", offset); */
if (dev->dma
&& offset >= 0
&& offset < ptoa(dev->dma->page_count))
return drm_dma_mmap(kdev, offset, prot);
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
/* DRM_DEBUG("considering 0x%x..0x%x\n", map->offset, map->offset + map->size - 1); */
if (offset >= map->offset
&& offset < map->offset + map->size) break;
}
if (i >= dev->map_count) {
DRM_DEBUG("can't find map\n");
return -1;
}
if (!map || ((map->flags&_DRM_RESTRICTED) && suser(curproc))) {
DRM_DEBUG("restricted map\n");
return -1;
}
switch (map->type) {
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
case _DRM_AGP:
return atop(offset);
case _DRM_SHM:
return atop(vtophys(offset));
default:
return -1; /* This should never happen. */
}
DRM_DEBUG("bailing out\n");
return -1;
}

View File

@ -1,8 +1,8 @@
/* drmP.h -- Private header for Direct Rendering Manager -*- c -*- /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
* *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved. * All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -19,137 +19,67 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drmP.h,v 1.3 2001/03/06 16:45:26 dawes Exp $
* *
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/ */
#ifndef _DRM_P_H_ #ifndef _DRM_P_H_
#define _DRM_P_H_ #define _DRM_P_H_
#ifdef _KERNEL #if defined(_KERNEL) || defined(__KERNEL__)
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/stat.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <sys/filio.h>
#include <sys/sysctl.h>
#include <sys/select.h>
#include <sys/bus.h>
#if __FreeBSD_version >= 400005
#include <sys/taskqueue.h>
#endif
#if __FreeBSD_version >= 400006 /* DRM template customization defaults
#define DRM_AGP
#endif
#ifdef DRM_AGP
#include <pci/agpvar.h>
#endif
#include "drm.h"
typedef u_int32_t atomic_t;
typedef u_int32_t cycles_t;
typedef u_int32_t spinlock_t;
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_subtract_int(p, 1)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_subtract_int(p, n)
/* The version number here is a guess */
#if __FreeBSD_version >= 500010
#define callout_init(a) callout_init(a, 0)
#endif
/* Fake this */
static __inline u_int32_t
test_and_set_bit(int b, volatile u_int32_t *p)
{
int s = splhigh();
u_int32_t m = 1<<b;
u_int32_t r = *p & m;
*p |= m;
splx(s);
return r;
}
static __inline void
clear_bit(int b, volatile u_int32_t *p)
{
atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f));
}
static __inline void
set_bit(int b, volatile u_int32_t *p)
{
atomic_set_int(p + (b >> 5), 1 << (b & 0x1f));
}
static __inline int
test_bit(int b, volatile u_int32_t *p)
{
return p[b >> 5] & (1 << (b & 0x1f));
}
static __inline int
find_first_zero_bit(volatile u_int32_t *p, int max)
{
int b;
for (b = 0; b < max; b += 32) {
if (p[b >> 5] != ~0) {
for (;;) {
if ((p[b >> 5] & (1 << (b & 0x1f))) == 0)
return b;
b++;
}
}
}
return max;
}
#define spldrm() spltty()
#define memset(p, v, s) bzero(p, s)
/*
* Fake out the module macros for versions of FreeBSD where they don't
* exist.
*/ */
#if __FreeBSD_version < 400002 #ifndef __HAVE_AGP
#define __HAVE_AGP 0
#define MODULE_VERSION(a,b) struct __hack #endif
#define MODULE_DEPEND(a,b,c,d,e) struct __hack #ifndef __HAVE_MTRR
#define __HAVE_MTRR 0
#endif
#ifndef __HAVE_CTX_BITMAP
#define __HAVE_CTX_BITMAP 0
#endif
#ifndef __HAVE_DMA
#define __HAVE_DMA 0
#endif
#ifndef __HAVE_DMA_IRQ
#define __HAVE_DMA_IRQ 0
#endif
#ifndef __HAVE_DMA_WAITLIST
#define __HAVE_DMA_WAITLIST 0
#endif
#ifndef __HAVE_DMA_FREELIST
#define __HAVE_DMA_FREELIST 0
#endif
#ifndef __HAVE_DMA_HISTOGRAM
#define __HAVE_DMA_HISTOGRAM 0
#endif #endif
#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then #define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
also include looping detection. */ also include looping detection. */
#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
typedef struct drm_device drm_device_t;
typedef struct drm_file drm_file_t;
/* There's undoubtably more of this file to go into these OS dependent ones. */
#include "drm_os_freebsd.h"
#include "drm.h"
/* Begin the DRM... */
#define DRM_HASH_SIZE 16 /* Size of key hash table */ #define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000 #define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */ #define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ #define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01 #define DRM_FLAG_DEBUG 0x01
@ -174,63 +104,27 @@ find_first_zero_bit(volatile u_int32_t *p, int max)
#define DRM_MEM_TOTALAGP 16 #define DRM_MEM_TOTALAGP 16
#define DRM_MEM_BOUNDAGP 17 #define DRM_MEM_BOUNDAGP 17
#define DRM_MEM_CTXBITMAP 18 #define DRM_MEM_CTXBITMAP 18
#define DRM_MEM_STUB 19
#define DRM_MEM_SGLISTS 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
/* Backward compatibility section */ /* Backward compatibility section */
/* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
#ifndef _PAGE_PWT #ifndef _PAGE_PWT
/* The name of _PAGE_WT was changed to
_PAGE_PWT in Linux 2.2.6 */
#define _PAGE_PWT _PAGE_WT #define _PAGE_PWT _PAGE_WT
#endif #endif
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock) /* Mapping helper macros */
#define _DRM_CAS(lock,old,new,__ret) \ #define DRM_IOREMAP(map) \
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
#define DRM_IOREMAPFREE(map) \
do { \ do { \
int __dummy; /* Can't mark eax as clobbered */ \ if ( (map)->handle && (map)->size ) \
__asm__ __volatile__( \ DRM(ioremapfree)( (map)->handle, (map)->size ); \
"lock ; cmpxchg %4,%1\n\t" \
"setnz %0" \
: "=d" (__ret), \
"=m" (__drm_dummy_lock(lock)), \
"=a" (__dummy) \
: "2" (old), \
"r" (new)); \
} while (0) } while (0)
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
#define DRM_MEM_ERROR(area, fmt, arg...) \
printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
drm_mem_stats[area].name , ##arg)
#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, arg...) \
do { \
if (drm_flags&DRM_FLAG_DEBUG) \
printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
##arg); \
} while (0)
#else
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#endif
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
#define DRM_SYSCTL_PRINT(fmt, arg...) \
snprintf(buf, sizeof(buf), fmt, ##arg); \
error = SYSCTL_OUT(req, buf, strlen(buf)); \
if (error) return error;
#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
snprintf(buf, sizeof(buf), fmt, ##arg); \
error = SYSCTL_OUT(req, buf, strlen(buf)); \
if (error) { ret; return error; }
/* Internal types and structures */ /* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
#define DRM_MIN(a,b) ((a)<(b)?(a):(b)) #define DRM_MIN(a,b) ((a)<(b)?(a):(b))
@ -240,6 +134,16 @@ find_first_zero_bit(volatile u_int32_t *p, int max)
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) #define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
(_map) = (_dev)->context_sareas[_ctx]; \
} while(0)
typedef struct drm_pci_list {
u16 vendor;
u16 device;
} drm_pci_list_t;
typedef struct drm_ioctl_desc { typedef struct drm_ioctl_desc {
d_ioctl_t *func; d_ioctl_t *func;
int auth_needed; int auth_needed;
@ -279,7 +183,7 @@ typedef struct drm_buf {
struct drm_buf *next; /* Kernel-only: used for free list */ struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */ __volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */ __volatile__ int pending; /* On hardware DMA queue */
int dma_wait; /* Processes waiting */ wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */ pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */ int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */ int while_locked;/* Dispatch this buffer while locked */
@ -292,15 +196,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5 DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */ } list; /* Which list we're on */
void *dev_private;
int dev_priv_size;
#if DRM_DMA_HISTOGRAM #if DRM_DMA_HISTOGRAM
struct timespec time_queued; /* Queued to kernel DMA queue */ cycles_t time_queued; /* Queued to kernel DMA queue */
struct timespec time_dispatched; /* Dispatched to hardware */ cycles_t time_dispatched; /* Dispatched to hardware */
struct timespec time_completed; /* Completed by hardware */ cycles_t time_completed; /* Completed by hardware */
struct timespec time_freed; /* Back on freelist */ cycles_t time_freed; /* Back on freelist */
#endif #endif
int dev_priv_size; /* Size of buffer private stoarge */
void *dev_private; /* Per-buffer private storage */
} drm_buf_t; } drm_buf_t;
#if DRM_DMA_HISTOGRAM #if DRM_DMA_HISTOGRAM
@ -332,8 +236,8 @@ typedef struct drm_waitlist {
drm_buf_t **rp; /* Read pointer */ drm_buf_t **rp; /* Read pointer */
drm_buf_t **wp; /* Write pointer */ drm_buf_t **wp; /* Write pointer */
drm_buf_t **end; /* End pointer */ drm_buf_t **end; /* End pointer */
spinlock_t read_lock; DRM_OS_SPINTYPE read_lock;
spinlock_t write_lock; DRM_OS_SPINTYPE write_lock;
} drm_waitlist_t; } drm_waitlist_t;
typedef struct drm_freelist { typedef struct drm_freelist {
@ -341,11 +245,11 @@ typedef struct drm_freelist {
atomic_t count; /* Number of free buffers */ atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */ drm_buf_t *next; /* End pointer */
int waiting; /* Processes waiting on free bufs */ wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */ int low_mark; /* Low water mark */
int high_mark; /* High water mark */ int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */ atomic_t wfh; /* If waiting for high mark */
struct simplelock lock; /* hope this doesn't need to be linux compatible */ DRM_OS_SPINTYPE lock;
} drm_freelist_t; } drm_freelist_t;
typedef struct drm_buf_entry { typedef struct drm_buf_entry {
@ -365,7 +269,7 @@ typedef struct drm_hw_lock {
} drm_hw_lock_t; } drm_hw_lock_t;
typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t; typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
typedef struct drm_file { struct drm_file {
TAILQ_ENTRY(drm_file) link; TAILQ_ENTRY(drm_file) link;
int authenticated; int authenticated;
int minor; int minor;
@ -375,33 +279,35 @@ typedef struct drm_file {
drm_magic_t magic; drm_magic_t magic;
unsigned long ioctl_count; unsigned long ioctl_count;
struct drm_device *devXX; struct drm_device *devXX;
} drm_file_t; };
typedef struct drm_queue { typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */ atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */ atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */ atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */ atomic_t block_read; /* Queue blocked for reads */
int read_queue; /* Processes waiting on block_read */ wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */ atomic_t block_write; /* Queue blocked for writes */
int write_queue; /* Processes waiting on block_write */ wait_queue_head_t write_queue; /* Processes waiting on block_write */
#if 1
atomic_t total_queued; /* Total queued statistic */ atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */ atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */ atomic_t total_locks; /* Total locks statistics */
#endif
drm_ctx_flags_t flags; /* Context preserving and 2D-only */ drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */ drm_waitlist_t waitlist; /* Pending buffers */
int flush_queue; /* Processes waiting until flush */ wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t; } drm_queue_t;
typedef struct drm_lock_data { typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */ drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */ pid_t pid; /* PID of lock holder (0=kernel) */
int lock_queue; /* Queue of blocked processes */ wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */ unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t; } drm_lock_data_t;
typedef struct drm_device_dma { typedef struct drm_device_dma {
#if 0
/* Performance Counters */ /* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */ atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */ atomic_t total_bytes; /* Total bytes DMA'd */
@ -415,27 +321,28 @@ typedef struct drm_device_dma {
atomic_t total_tried; /* Tried next_buffer */ atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */ atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */ atomic_t total_lost; /* Lost interrupt */
#endif
drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count; int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */ drm_buf_t **buflist; /* Vector of pointers info bufs */
int seg_count; int seg_count;
int page_count; int page_count;
vm_offset_t *pagelist; unsigned long *pagelist;
unsigned long byte_count; unsigned long byte_count;
enum { enum {
_DRM_DMA_USE_AGP = 0x01 _DRM_DMA_USE_AGP = 0x01,
_DRM_DMA_USE_SG = 0x02
} flags; } flags;
/* DMA support */ /* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */ drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */ drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/ drm_queue_t *next_queue; /* Queue from which buffer selected*/
int waiting; /* Processes waiting on free bufs */ wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t; } drm_device_dma_t;
#ifdef DRM_AGP #if __REALLY_HAVE_AGP
typedef struct drm_agp_mem { typedef struct drm_agp_mem {
void *handle; void *handle;
unsigned long bound; /* address */ unsigned long bound; /* address */
@ -454,11 +361,30 @@ typedef struct drm_agp_head {
int acquired; int acquired;
unsigned long base; unsigned long base;
int agp_mtrr; int agp_mtrr;
int cant_use_aperture;
unsigned long page_mask;
} drm_agp_head_t; } drm_agp_head_t;
#endif #endif
typedef struct drm_device { typedef struct drm_sg_mem {
unsigned long handle;
void *virtual;
int pages;
struct page **pagelist;
} drm_sg_mem_t;
typedef struct drm_sigdata {
int context;
drm_hw_lock_t *lock;
} drm_sigdata_t;
typedef TAILQ_HEAD(drm_map_list, drm_map_list_entry) drm_map_list_t;
typedef struct drm_map_list_entry {
TAILQ_ENTRY(drm_map_list_entry) link;
drm_map_t *map;
} drm_map_list_entry_t;
struct drm_device {
const char *name; /* Simple driver name */ const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */ char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */ int unique_len; /* Length of unique field */
@ -472,9 +398,8 @@ typedef struct drm_device {
struct proc_dir_entry *root; /* Root for this device's entries */ struct proc_dir_entry *root; /* Root for this device's entries */
/* Locks */ /* Locks */
struct simplelock count_lock; /* For inuse, open_count, buf_use */ DRM_OS_SPINTYPE count_lock; /* For inuse, open_count, buf_use */
struct lock dev_lock; /* For others */ struct lock dev_lock; /* For others */
/* Usage Counters */ /* Usage Counters */
int open_count; /* Outstanding files open */ int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */ atomic_t ioctl_count; /* Outstanding IOCTLs pending */
@ -482,26 +407,22 @@ typedef struct drm_device {
int buf_use; /* Buffers in use -- cannot alloc */ int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */ atomic_t buf_alloc; /* Buffer allocation in progress */
/* Performance Counters */ /* Performance counters */
atomic_t total_open; unsigned long counters;
atomic_t total_close; drm_stat_type_t types[15];
atomic_t total_ioctl; atomic_t counts[15];
atomic_t total_irq; /* Total interruptions */
atomic_t total_ctx; /* Total context switches */
atomic_t total_locks;
atomic_t total_unlocks;
atomic_t total_contends;
atomic_t total_sleeps;
/* Authentication */ /* Authentication */
drm_file_list_t files; drm_file_list_t files;
drm_magic_head_t magiclist[DRM_HASH_SIZE]; drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */ /* Memory management */
drm_map_t **maplist; /* Vector of pointers to regions */ drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */ int map_count; /* Number of mappable regions */
drm_map_t **context_sareas;
int max_context;
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */ drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */ drm_lock_data_t lock; /* Information on hardware lock */
@ -513,22 +434,23 @@ typedef struct drm_device {
drm_device_dma_t *dma; /* Optional pointer for DMA support */ drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */ /* Context support */
struct resource *irq; /* Interrupt used by board */ int irq; /* Interrupt used by board */
struct resource *irqr; /* Resource for interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */ void *irqh; /* Handle from bus_setup_intr */
__volatile__ long context_flag; /* Context swapping flag */ __volatile__ long context_flag; /* Context swapping flag */
__volatile__ long interrupt_flag;/* Interruption handler flag */ __volatile__ long interrupt_flag; /* Interruption handler flag */
__volatile__ long dma_flag; /* DMA dispatch flag */ __volatile__ long dma_flag; /* DMA dispatch flag */
struct callout timer; /* Timer for delaying ctx switch */ struct callout timer; /* Timer for delaying ctx switch */
int context_wait; /* Processes waiting on ctx switch */ wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */ int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */ int last_context; /* Last current context */
int last_switch; /* Time at last context switch */ unsigned long last_switch; /* jiffies at last context switch */
#if __FreeBSD_version >= 400005 #if __FreeBSD_version >= 400005
struct task task; struct task task;
#endif #endif
struct timespec ctx_start; cycles_t ctx_start;
struct timespec lck_start; cycles_t lck_start;
#if DRM_DMA_HISTOGRAM #if __HAVE_DMA_HISTOGRAM
drm_histogram_t histo; drm_histogram_t histo;
#endif #endif
@ -540,196 +462,171 @@ typedef struct drm_device {
char *buf_end; /* End pointer */ char *buf_end; /* End pointer */
struct sigio *buf_sigio; /* Processes waiting for SIGIO */ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
struct selinfo buf_sel; /* Workspace for select/poll */ struct selinfo buf_sel; /* Workspace for select/poll */
int buf_readers; /* Processes waiting to read */ int buf_selecting;/* True if poll sleeper */
int buf_writers; /* Processes waiting to ctx switch */ wait_queue_head_t buf_readers; /* Processes waiting to read */
int buf_selecting; /* True if poll sleeper */ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
/* Sysctl support */ /* Sysctl support */
struct drm_sysctl_info *sysctl; struct drm_sysctl_info *sysctl;
#ifdef DRM_AGP #if __REALLY_HAVE_AGP
drm_agp_head_t *agp; drm_agp_head_t *agp;
#endif #endif
u_int32_t *ctx_bitmap; struct pci_dev *pdev;
void *dev_private; #ifdef __alpha__
} drm_device_t; #if LINUX_VERSION_CODE < 0x020403
struct pci_controler *hose;
/* Internal function definitions */
/* Misc. support (init.c) */
extern int drm_flags;
extern void drm_parse_options(char *s);
/* Device support (fops.c) */
extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p);
extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
drm_device_t *dev);
extern d_close_t drm_close;
extern d_read_t drm_read;
extern d_write_t drm_write;
extern d_poll_t drm_poll;
extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p);
extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p);
extern int drm_write_string(drm_device_t *dev, const char *s);
#if 0
/* Mapping support (vm.c) */
extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern void drm_vm_open(struct vm_area_struct *vma);
extern void drm_vm_close(struct vm_area_struct *vma);
extern int drm_mmap_dma(struct file *filp,
struct vm_area_struct *vma);
#endif
extern d_mmap_t drm_mmap;
/* Proc support (proc.c) */
extern int drm_sysctl_init(drm_device_t *dev);
extern int drm_sysctl_cleanup(drm_device_t *dev);
/* Memory management support (memory.c) */
extern void drm_mem_init(void);
#if __FreeBSD_version < 411000
#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
#else #else
#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS) struct pci_controller *hose;
#endif #endif
extern int drm_mem_info DRM_SYSCTL_HANDLER_ARGS;
extern void *drm_alloc(size_t size, int area);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *drm_strdup(const char *s, int area);
extern void drm_strfree(char *s, int area);
extern void drm_free(void *pt, size_t size, int area);
extern unsigned long drm_alloc_pages(int order, int area);
extern void drm_free_pages(unsigned long address, int order,
int area);
extern void *drm_ioremap(unsigned long offset, unsigned long size);
extern void drm_ioremapfree(void *pt, unsigned long size);
#ifdef DRM_AGP
extern void *drm_alloc_agp(int pages, u_int32_t type);
extern int drm_free_agp(void *handle, int pages);
extern int drm_bind_agp(void *handle, unsigned int start);
extern int drm_unbind_agp(void *handle);
#endif #endif
drm_sg_mem_t *sg; /* Scatter gather memory */
unsigned long *ctx_bitmap;
void *dev_private;
drm_sigdata_t sigdata; /* For block_all_signals */
sigset_t sigmask;
};
/* Buffer management support (bufs.c) */ extern int DRM(flags);
extern int drm_order(unsigned long size); extern void DRM(parse_options)( char *s );
extern d_ioctl_t drm_addmap; extern int DRM(cpu_valid)( void );
extern d_ioctl_t drm_addbufs;
extern d_ioctl_t drm_infobufs;
extern d_ioctl_t drm_markbufs;
extern d_ioctl_t drm_freebufs;
extern d_ioctl_t drm_mapbufs;
/* Authentication (drm_auth.h) */
/* Buffer list management support (lists.c) */ extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv,
extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
extern int drm_waitlist_destroy(drm_waitlist_t *bl);
extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
extern int drm_freelist_create(drm_freelist_t *bl, int count);
extern int drm_freelist_destroy(drm_freelist_t *bl);
extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
/* DMA support (gen_dma.c) */
extern void drm_dma_setup(drm_device_t *dev);
extern void drm_dma_takedown(drm_device_t *dev);
extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int drm_context_switch(drm_device_t *dev, int old, int new);
extern int drm_context_switch_complete(drm_device_t *dev, int new);
extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
extern void drm_clear_next_buffer(drm_device_t *dev);
extern int drm_select_queue(drm_device_t *dev,
void (*wrapper)(void *));
extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
#if DRM_DMA_HISTOGRAM
extern int drm_histogram_slot(struct timespec *ts);
extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Misc. IOCTL support (ioctl.c) */
extern d_ioctl_t drm_irq_busid;
extern d_ioctl_t drm_getunique;
extern d_ioctl_t drm_setunique;
/* Context IOCTL support (context.c) */
extern d_ioctl_t drm_resctx;
extern d_ioctl_t drm_addctx;
extern d_ioctl_t drm_modctx;
extern d_ioctl_t drm_getctx;
extern d_ioctl_t drm_switchctx;
extern d_ioctl_t drm_newctx;
extern d_ioctl_t drm_rmctx;
/* Drawable IOCTL support (drawable.c) */
extern d_ioctl_t drm_adddraw;
extern d_ioctl_t drm_rmdraw;
/* Authentication IOCTL support (auth.c) */
extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
drm_magic_t magic); drm_magic_t magic);
extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic); extern int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic);
extern d_ioctl_t drm_getmagic;
extern d_ioctl_t drm_authmagic;
/* Driver support (drm_drv.h) */
extern int DRM(version)( DRM_OS_IOCTL );
extern int DRM(write_string)(drm_device_t *dev, const char *s);
/* Locking IOCTL support (lock.c) */ /* Memory management support (drm_memory.h) */
extern d_ioctl_t drm_block; extern void DRM(mem_init)(void);
extern d_ioctl_t drm_unblock; extern void *DRM(alloc)(size_t size, int area);
extern int drm_lock_take(__volatile__ unsigned int *lock, extern void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *DRM(strdup)(const char *s, int area);
extern void DRM(strfree)(char *s, int area);
extern void DRM(free)(void *pt, size_t size, int area);
extern unsigned long DRM(alloc_pages)(int order, int area);
extern void DRM(free_pages)(unsigned long address, int order,
int area);
extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
extern void DRM(ioremapfree)(void *pt, unsigned long size);
#if __REALLY_HAVE_AGP
extern agp_memory *DRM(alloc_agp)(int pages, u32 type);
extern int DRM(free_agp)(agp_memory *handle, int pages);
extern int DRM(bind_agp)(agp_memory *handle, unsigned int start);
extern int DRM(unbind_agp)(agp_memory *handle);
#endif
extern int DRM(context_switch)(drm_device_t *dev, int old, int new);
extern int DRM(context_switch_complete)(drm_device_t *dev, int new);
#if __HAVE_CTX_BITMAP
extern int DRM(ctxbitmap_init)( drm_device_t *dev );
extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev );
extern void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle );
extern int DRM(ctxbitmap_next)( drm_device_t *dev );
#endif
/* Locking IOCTL support (drm_lock.h) */
extern int DRM(lock_take)(__volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern int drm_lock_transfer(drm_device_t *dev, extern int DRM(lock_transfer)(drm_device_t *dev,
__volatile__ unsigned int *lock, __volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern int drm_lock_free(drm_device_t *dev, extern int DRM(lock_free)(drm_device_t *dev,
__volatile__ unsigned int *lock, __volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern d_ioctl_t drm_finish; extern int DRM(flush_unblock)(drm_device_t *dev, int context,
extern int drm_flush_unblock(drm_device_t *dev, int context,
drm_lock_flags_t flags); drm_lock_flags_t flags);
extern int drm_flush_block_and_flush(drm_device_t *dev, int context, extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags); drm_lock_flags_t flags);
extern int DRM(notifier)(void *priv);
/* Context Bitmap support (ctxbitmap.c) */ /* Buffer management support (drm_bufs.h) */
extern int drm_ctxbitmap_init(drm_device_t *dev); extern int DRM(order)( unsigned long size );
extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
extern int drm_ctxbitmap_next(drm_device_t *dev);
extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
#ifdef DRM_AGP #if __HAVE_DMA
/* AGP/GART support (agpsupport.c) */ /* DMA support (drm_dma.h) */
extern drm_agp_head_t *drm_agp_init(void); extern int DRM(dma_setup)(drm_device_t *dev);
extern d_ioctl_t drm_agp_acquire; extern void DRM(dma_takedown)(drm_device_t *dev);
extern d_ioctl_t drm_agp_release; extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
extern d_ioctl_t drm_agp_enable; extern void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid);
extern d_ioctl_t drm_agp_info; #if __HAVE_OLD_DMA
extern d_ioctl_t drm_agp_alloc; /* GH: This is a dirty hack for now...
extern d_ioctl_t drm_agp_free; */
extern d_ioctl_t drm_agp_unbind; extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern d_ioctl_t drm_agp_bind; extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
#endif
#if __HAVE_DMA_IRQ
extern int DRM(irq_install)( drm_device_t *dev, int irq );
extern int DRM(irq_uninstall)( drm_device_t *dev );
extern void DRM(dma_service)( DRM_OS_IRQ_ARGS );
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( DRM_OS_TASKQUEUE_ARGS );
#endif #endif
#endif #endif
#if DRM_DMA_HISTOGRAM
extern int DRM(histogram_slot)(unsigned long count);
extern void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Buffer list support (drm_lists.h) */
#if __HAVE_DMA_WAITLIST
extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
#endif
#if __HAVE_DMA_FREELIST
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif
#endif /* __HAVE_DMA */
#if __REALLY_HAVE_AGP
/* AGP/GART support (drm_agpsupport.h) */
extern drm_agp_head_t *DRM(agp_init)(void);
extern void DRM(agp_uninit)(void);
extern void DRM(agp_do_release)(void);
extern agp_memory *DRM(agp_allocate_memory)(size_t pages, u32 type);
extern int DRM(agp_free_memory)(agp_memory *handle);
extern int DRM(agp_bind_memory)(agp_memory *handle, off_t start);
extern int DRM(agp_unbind_memory)(agp_memory *handle);
#endif
/* Proc support (drm_proc.h) */
extern struct proc_dir_entry *DRM(proc_init)(drm_device_t *dev,
int minor,
struct proc_dir_entry *root,
struct proc_dir_entry **dev_root);
extern int DRM(proc_cleanup)(int minor,
struct proc_dir_entry *root,
struct proc_dir_entry *dev_root);
#if __HAVE_SG
/* Scatter Gather Support (drm_scatter.h) */
extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
#endif
#if __REALLY_HAVE_SG
/* ATI PCIGART support (ati_pcigart.h) */
extern int DRM(ati_pcigart_init)(drm_device_t *dev,
unsigned long *addr,
dma_addr_t *bus_addr);
extern int DRM(ati_pcigart_cleanup)(drm_device_t *dev,
unsigned long addr,
dma_addr_t bus_addr);
#endif
#endif /* __KERNEL__ */
#endif #endif

View File

@ -1,10 +1,10 @@
# $FreeBSD$ # $FreeBSD$
KMOD = gamma KMOD = gamma
NOMAN= YES
SRCS = gamma_drv.c gamma_dma.c SRCS = gamma_drv.c gamma_dma.c
SRCS += device_if.h bus_if.h pci_if.h SRCS += device_if.h bus_if.h pci_if.h opt_drm_linux.h
CFLAGS += ${DEBUG_FLAGS} -I. -I.. CFLAGS += ${DEBUG_FLAGS} -I. -I..
KMODDEPS = drm
@: @:
ln -sf /sys @ ln -sf /sys @
@ -12,4 +12,14 @@ KMODDEPS = drm
machine: machine:
ln -sf /sys/i386/include machine ln -sf /sys/i386/include machine
.if ${MACHINE_ARCH} == "i386"
# This line enables linux ioctl handling
# If you want support for this uncomment this line
#TDFX_OPTS= "\#define DRM_LINUX" 1
.endif
opt_drm_linux.h:
touch opt_drm_linux.h
echo $(TDFX_OPTS) >> opt_drm_linux.h
.include <bsd.kmod.mk> .include <bsd.kmod.mk>

201
bsd/i810_drm.h Normal file
View File

@ -0,0 +1,201 @@
#ifndef _I810_DRM_H_
#define _I810_DRM_H_
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
#ifndef _I810_DEFINES_
#define _I810_DEFINES_
#define I810_DMA_BUF_ORDER 12
#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
#define I810_DMA_BUF_NR 256
#define I810_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define I810_NR_TEX_REGIONS 64
#define I810_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define I810_UPLOAD_CTX 0x4
#define I810_UPLOAD_BUFFERS 0x8
#define I810_UPLOAD_TEX0 0x10
#define I810_UPLOAD_TEX1 0x20
#define I810_UPLOAD_CLIPRECTS 0x40
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
/* Destbuffer state
* - backbuffer linear offset and pitch -- invarient in the current dri
* - zbuffer linear offset and pitch -- also invarient
* - drawing origin in back and depth buffers.
*
* Keep the depth/back buffer state here to acommodate private buffers
* in the future.
*/
#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
#define I810_DESTREG_DI1 1
#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
#define I810_DESTREG_DV1 3
#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
#define I810_DESTREG_DR1 5
#define I810_DESTREG_DR2 6
#define I810_DESTREG_DR3 7
#define I810_DESTREG_DR4 8
#define I810_DEST_SETUP_SIZE 10
/* Context state
*/
#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
#define I810_CTXREG_CF1 1
#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
#define I810_CTXREG_ST1 3
#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
#define I810_CTX_SETUP_SIZE 20
/* Texture state (per tex unit)
*/
#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
#define I810_TEXREG_MI1 1
#define I810_TEXREG_MI2 2
#define I810_TEXREG_MI3 3
#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
#define I810_TEX_SETUP_SIZE 8
#define I810_FRONT 0x1
#define I810_BACK 0x2
#define I810_DEPTH 0x4
typedef struct _drm_i810_init {
enum {
I810_INIT_DMA = 0x01,
I810_CLEANUP_DMA = 0x02
} func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int ring_map_idx;
int buffer_map_idx;
#else
unsigned int mmio_offset;
unsigned int buffers_offset;
#endif
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int overlay_offset;
unsigned int overlay_physical;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
} drm_i810_init_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_i810_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_i810_tex_region_t;
typedef struct _drm_i810_sarea {
unsigned int ContextState[I810_CTX_SETUP_SIZE];
unsigned int BufferState[I810_DEST_SETUP_SIZE];
unsigned int TexState[2][I810_TEX_SETUP_SIZE];
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
} drm_i810_sarea_t;
typedef struct _drm_i810_clear {
int clear_color;
int clear_depth;
int flags;
} drm_i810_clear_t;
/* These may be placeholders if we have more cliprects than
* I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
* false, indicating that the buffer will be dispatched again with a
* new set of cliprects.
*/
typedef struct _drm_i810_vertex {
int idx; /* buffer index */
int used; /* nr bytes in use */
int discard; /* client is finished with the buffer? */
} drm_i810_vertex_t;
typedef struct _drm_i810_copy_t {
int idx; /* buffer index */
int used; /* nr bytes in use */
void *address; /* Address to copy from */
} drm_i810_copy_t;
typedef struct drm_i810_dma {
void *virtual;
int request_idx;
int request_size;
int granted;
} drm_i810_dma_t;
#endif /* _I810_DRM_H_ */

View File

@ -1,604 +0,0 @@
/* mga_bufs.c -- IOCTLs to manage buffers
* Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
#include <sys/mman.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
#include <vm/vm_map.h>
static int
mga_addbufs_agp(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
agp_offset = request.agp_start;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
DRM_DEBUG("byte_count: %d\n", byte_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
if (dev->queue_count) return EBUSY; /* Not while in use */
simple_lock(&dev->count_lock);
if (dev->buf_use) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
atomic_inc(&dev->buf_alloc);
simple_unlock(&dev->count_lock);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
entry = &dma->bufs[order];
if (entry->buf_count) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while(entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
DRM_DEBUG("offset : %ld\n", offset);
buf->offset = offset; /* Hrm */
buf->bus_address = dev->agp->base + agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->agp->base);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->dma_wait = 0;
buf->pid = 0;
buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
#if DRM_DMA_HISTOGRAM
timespecclear(&buf->time_queued);
timespecclear(&buf->time_dispatched);
timespecclear(&buf->time_completed);
timespecclear(&buf->time_freed);
#endif
offset = offset + alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
dma->byte_count += byte_count;
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
request.count = entry->buf_count;
request.size = size;
*(drm_buf_desc_t *) data = request;
atomic_dec(&dev->buf_alloc);
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->flags = _DRM_DMA_USE_AGP;
DRM_DEBUG("dma->flags : %x\n", dma->flags);
return 0;
}
static int
mga_addbufs_pci(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count;
int order;
int size;
int total;
int page_order;
drm_buf_entry_t *entry;
unsigned long page;
drm_buf_t *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
request.count, request.size, size, order, dev->queue_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
if (dev->queue_count) return EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
simple_lock(&dev->count_lock);
if (dev->buf_use) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
atomic_inc(&dev->buf_alloc);
simple_unlock(&dev->count_lock);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
entry = &dma->bufs[order];
if (entry->buf_count) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
DRM_MEM_SEGS);
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->seglist, 0, count * sizeof(*entry->seglist));
dma->pagelist = drm_realloc(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
entry->seglist[entry->seg_count++] = page;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i);
dma->pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->dma_wait = 0;
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
timespecclear(&buf->time_queued);
timespecclear(&buf->time_dispatched);
timespecclear(&buf->time_completed);
timespecclear(&buf->time_freed);
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
request.count = entry->buf_count;
request.size = size;
*(drm_buf_desc_t *) data = request;
atomic_dec(&dev->buf_alloc);
return 0;
}
int
mga_addbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_buf_desc_t request;
request = *(drm_buf_desc_t *) data;
if(request.flags & _DRM_AGP_BUFFER)
return mga_addbufs_agp(kdev, cmd, data, flags, p);
else
return mga_addbufs_pci(kdev, cmd, data, flags, p);
}
int
mga_infobufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
int i;
int count;
int error;
if (!dma) return EINVAL;
simple_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
simple_unlock(&dev->count_lock);
request = *(drm_buf_info_t *) data;
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
}
DRM_DEBUG("count = %d\n", count);
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
error = copyout(&dma->bufs[i].buf_count,
&request.list[count].count,
sizeof(dma->bufs[0]
.buf_count));
if (error) return error;
error = copyout(&dma->bufs[i].buf_size,
&request.list[count].size,
sizeof(dma->bufs[0]
.buf_size));
if (error) return error;
error = copyout(&dma->bufs[i]
.freelist.low_mark,
&request.list[count].low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark));
if (error) return error;
error = copyout(&dma->bufs[i]
.freelist.high_mark,
&request.list[count].high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark));
if (error) return error;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
}
request.count = count;
*(drm_buf_info_t *) data = request;
return 0;
}
int
mga_markbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int order;
drm_buf_entry_t *entry;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
order = drm_order(request.size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
entry = &dma->bufs[order];
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
return EINVAL;
if (request.high_mark < 0 || request.high_mark > entry->buf_count)
return EINVAL;
entry->freelist.low_mark = request.low_mark;
entry->freelist.high_mark = request.high_mark;
return 0;
}
int
mga_freebufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
int i;
int error;
int idx;
drm_buf_t *buf;
if (!dma) return EINVAL;
request = *(drm_buf_free_t *) data;
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
error = copyin(&request.list[i],
&idx,
sizeof(idx));
if (error) return error;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != p->p_pid) {
DRM_ERROR("Process %d freeing buffer owned by %d\n",
p->p_pid, buf->pid);
return EINVAL;
}
drm_free_buffer(dev, buf);
}
return 0;
}
int
mga_mapbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
vm_offset_t virtual;
vm_offset_t address;
drm_buf_map_t request;
int i;
if (!dma) return EINVAL;
DRM_DEBUG("\n");
simple_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
simple_unlock(&dev->count_lock);
DRM_DEBUG("Busy\n");
return EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
simple_unlock(&dev->count_lock);
request = *(drm_buf_map_t *) data;
DRM_DEBUG("mga_mapbufs\n");
DRM_DEBUG("dma->flags : %x\n", dma->flags);
if (request.count >= dma->buf_count) {
if(dma->flags & _DRM_DMA_USE_AGP) {
drm_mga_private_t *dev_priv = dev->dev_private;
drm_map_t *map = NULL;
map = dev->maplist[dev_priv->buffer_map_idx];
if (!map) {
DRM_DEBUG("map is null\n");
retcode = EINVAL;
goto done;
}
DRM_DEBUG("map->offset : %lx\n", map->offset);
DRM_DEBUG("map->size : %lx\n", map->size);
DRM_DEBUG("map->type : %d\n", map->type);
DRM_DEBUG("map->flags : %x\n", map->flags);
DRM_DEBUG("map->handle : %p\n", map->handle);
DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
virtual = 0;
retcode = vm_mmap(&p->p_vmspace->vm_map,
&virtual,
map->size,
PROT_READ|PROT_WRITE, VM_PROT_ALL,
MAP_SHARED,
SLIST_FIRST(&kdev->si_hlist),
map->offset);
} else {
virtual = 0;
retcode = vm_mmap(&p->p_vmspace->vm_map,
&virtual,
round_page(dma->byte_count),
PROT_READ|PROT_WRITE, VM_PROT_ALL,
MAP_SHARED,
SLIST_FIRST(&kdev->si_hlist),
0);
}
if (retcode) {
/* Real error */
DRM_DEBUG("mmap error\n");
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
retcode = copyout(&dma->buflist[i]->idx,
&request.list[i].idx,
sizeof(request.list[0].idx));
if (retcode) goto done;
retcode = copyout(&dma->buflist[i]->total,
&request.list[i].total,
sizeof(request.list[0].total));
if (retcode) goto done;
retcode = copyout(&zero,
&request.list[i].used,
sizeof(request.list[0].used));
if (retcode) goto done;
address = virtual + dma->buflist[i]->offset;
retcode = copyout(&address,
&request.list[i].address,
sizeof(address));
if (retcode) goto done;
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
*(drm_buf_map_t *) data = request;
DRM_DEBUG("retcode : %d\n", retcode);
return retcode;
}

View File

@ -1,200 +0,0 @@
/* mga_context.c -- IOCTLs for mga contexts
* Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
static int mga_alloc_queue(drm_device_t *dev)
{
int temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("mga_alloc_queue: %d\n", temp);
return temp;
}
int mga_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return EBUSY;
}
#if DRM_DMA_HISTOGRAM
getnanotime(&dev->ctx_start);
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
mga_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int mga_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = ticks;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &dev->lck_start);
atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
}
#endif
clear_bit(0, &dev->context_flag);
wakeup(&dev->context_wait);
return 0;
}
int
mga_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i, error;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
res = *(drm_ctx_res_t *) data;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
error = copyout(&i, &res.contexts[i], sizeof(i));
if (error) return error;
}
}
res.count = DRM_RESERVED_CONTEXTS;
*(drm_ctx_res_t *) data = res;
return 0;
}
int
mga_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = mga_alloc_queue(dev);
}
if (ctx.handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return EBUSY instead? */
return ENOMEM;
}
DRM_DEBUG("%d\n", ctx.handle);
*(drm_ctx_t *) data = ctx;
return 0;
}
int
mga_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
/* This does nothing for the mga */
return 0;
}
int mga_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
*(drm_ctx_t *) data = ctx;
return 0;
}
int mga_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
return mga_context_switch(dev, dev->last_context, ctx.handle);
}
int mga_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
mga_context_switch_complete(dev, ctx.handle);
return 0;
}
int mga_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
/*
if(ctx.handle == DRM_KERNEL_CONTEXT+1)
priv->remove_auth_on_close = 1;
*/
if(ctx.handle != DRM_KERNEL_CONTEXT ) {
drm_ctxbitmap_free(dev, ctx.handle);
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/* mga_drv.c -- Matrox g200/g400 driver /* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
* *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@ -19,696 +19,80 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
* *
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/ */
#include <sys/types.h>
#include <sys/bus.h>
#include <pci/pcivar.h>
#include <opt_drm_linux.h>
#include "mga.h"
#include "drmP.h" #include "drmP.h"
#include "mga_drv.h" #include "mga_drv.h"
#include <pci/pcivar.h> #define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
MODULE_DEPEND(mga, drm, 1, 1, 1); #define DRIVER_NAME "mga"
MODULE_DEPEND(mga, agp, 1, 1, 1); #define DRIVER_DESC "Matrox G200/G400"
#define DRIVER_DATE "20010321"
#define MGA_NAME "mga" #define DRIVER_MAJOR 3
#define MGA_DESC "Matrox g200/g400" #define DRIVER_MINOR 0
#define MGA_DATE "20000928" #define DRIVER_PATCHLEVEL 2
#define MGA_MAJOR 2
#define MGA_MINOR 0
#define MGA_PATCHLEVEL 0
drm_ctx_t mga_res_ctx; /* List acquired from http://www.yourvote.com/pci/pcihdr.h and xc/xc/programs/Xserver/hw/xfree86/common/xf86PciInfo.h
* Please report to anholt@teleport.com inaccuracies or if a chip you have works that is marked unsupported here.
static int mga_probe(device_t dev) */
{ drm_chipinfo_t DRM(devicelist)[] = {
const char *s = 0; {0x102b, 0x0520, 0, "Matrox G200 (PCI)"},
{0x102b, 0x0521, 1, "Matrox G200 (AGP)"},
switch (pci_get_devid(dev)) { {0x102b, 0x0525, 1, "Matrox G400 (AGP)"},
case 0x0525102b: {0, 0, 0, NULL}
s = "Matrox MGA G400 AGP graphics accelerator";
break;
case 0x0521102b:
s = "Matrox MGA G200 AGP graphics accelerator";
break;
}
if (s) {
device_set_desc(dev, s);
return 0;
}
return ENXIO;
}
static int mga_attach(device_t dev)
{
return mga_init(dev);
}
static int mga_detach(device_t dev)
{
mga_cleanup(dev);
return 0;
}
static device_method_t mga_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mga_probe),
DEVMETHOD(device_attach, mga_attach),
DEVMETHOD(device_detach, mga_detach),
{ 0, 0 }
}; };
static driver_t mga_driver = { #define DRIVER_IOCTLS \
"drm", [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \
mga_methods, [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \
sizeof(drm_device_t), [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \
}; [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \
static devclass_t mga_devclass; [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \
#define MGA_SOFTC(unit) \ [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \
((drm_device_t *) devclass_get_softc(mga_devclass, unit)) [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \
DRIVER_MODULE(if_mga, pci, mga_driver, mga_devclass, 0, 0); [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 },
#define CDEV_MAJOR 145
/* mga_drv.c */ #define __HAVE_COUNTERS 3
static struct cdevsw mga_cdevsw = { #define __HAVE_COUNTER6 _DRM_STAT_IRQ
/* open */ mga_open, #define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
/* close */ mga_close, #define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
/* read */ drm_read,
/* write */ drm_write,
/* ioctl */ mga_ioctl, #include "drm_agpsupport.h"
/* poll */ drm_poll, #include "drm_auth.h"
/* mmap */ drm_mmap, #include "drm_bufs.h"
/* strategy */ nostrategy, #include "drm_context.h"
/* name */ "mga", #include "drm_dma.h"
/* maj */ CDEV_MAJOR, #include "drm_drawable.h"
/* dump */ nodump, #include "drm_drv.h"
/* psize */ nopsize,
/* flags */ D_TTY | D_TRACKCLOSE,
/* bmaj */ -1 #include "drm_fops.h"
}; #include "drm_init.h"
#include "drm_ioctl.h"
static drm_ioctl_desc_t mga_ioctls[] = { #include "drm_lock.h"
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 }, #include "drm_memory.h"
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, #include "drm_vm.h"
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, #include "drm_sysctl.h"
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
DRIVER_MODULE(mga, pci, mga_driver, mga_devclass, 0, 0);
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_swap_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_clear_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 },
};
#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
static int mga_setup(drm_device_t *dev)
{
int i;
device_busy(dev->device);
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
dev->lock.lock_queue = 0;
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
callout_init(&dev->timer);
dev->context_wait = 0;
timespecclear(&dev->ctx_start);
timespecclear(&dev->lck_start);
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
bzero(&dev->buf_sel, sizeof dev->buf_sel);
dev->buf_sigio = NULL;
dev->buf_readers = 0;
dev->buf_writers = 0;
dev->buf_selecting = 0;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int mga_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) mga_irq_uninstall(dev);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
callout_stop(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->handle);
drm_free_agp(entry->handle, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired)
agp_release(dev->agp->agpdev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wakeup(&dev->lock.lock_queue);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
/* mga_init is called via mga_attach at module load time, */
int
mga_init(device_t nbdev)
{
int retcode;
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
simple_lock_init(&dev->count_lock);
lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
#if 0
drm_parse_options(mga);
#endif
dev->device = nbdev;
dev->devnode = make_dev(&mga_cdevsw,
device_get_unit(nbdev),
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
MGA_NAME);
dev->name = MGA_NAME;
DRM_DEBUG("doing mem init\n");
drm_mem_init();
DRM_DEBUG("doing proc init\n");
drm_sysctl_init(dev);
TAILQ_INIT(&dev->files);
DRM_DEBUG("doing agp init\n");
dev->agp = drm_agp_init();
if(dev->agp == NULL) {
DRM_INFO("The mga drm module requires the agp module"
" to function correctly\nPlease load the agp"
" module before you load the mga module\n");
drm_sysctl_cleanup(dev);
mga_takedown(dev);
return ENOMEM;
}
#if 0
dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024 * 1024,
MTRR_TYPE_WRCOMB,
1);
#endif
DRM_DEBUG("doing ctxbitmap init\n");
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_sysctl_cleanup(dev);
mga_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
MGA_NAME,
MGA_MAJOR,
MGA_MINOR,
MGA_PATCHLEVEL,
MGA_DATE,
device_get_unit(nbdev));
return 0;
}
/* mga_cleanup is called via cleanup_module at module unload time. */
void mga_cleanup(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
drm_sysctl_cleanup(dev);
destroy_dev(dev->devnode);
DRM_INFO("Module unloaded\n");
drm_ctxbitmap_cleanup(dev);
mga_dma_cleanup(dev);
#if 0
if(dev->agp && dev->agp->agp_mtrr) {
int retval;
retval = mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024*1024);
DRM_DEBUG("mtrr_del = %d\n", retval);
}
#endif
mga_takedown(dev);
if (dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
}
int
mga_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_version_t version;
int len;
version = *(drm_version_t *) data;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
int error = copyout(value, name, len); \
if (error) return error; \
}
version.version_major = MGA_MAJOR;
version.version_minor = MGA_MINOR;
version.version_patchlevel = MGA_PATCHLEVEL;
DRM_COPY(version.name, MGA_NAME);
DRM_COPY(version.date, MGA_DATE);
DRM_COPY(version.desc, MGA_DESC);
*(drm_version_t *) data = version;
return 0;
}
int
mga_open(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = MGA_SOFTC(minor(kdev));
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
device_busy(dev->device);
if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
atomic_inc(&dev->total_open);
simple_lock(&dev->count_lock);
if (!dev->open_count++) {
simple_unlock(&dev->count_lock);
retcode = mga_setup(dev);
}
simple_unlock(&dev->count_lock);
}
device_unbusy(dev->device);
return retcode;
}
int
mga_close(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
DRM_DEBUG("pid = %d, open_count = %d\n",
p->p_pid, dev->open_count);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.pid == p->p_pid) {
mga_reclaim_buffers(dev, priv->pid);
DRM_ERROR("Process %d dead, freeing lock for context %d\n",
p->p_pid,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev,
&dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
} else if (dev->lock.hw_lock) {
/* The lock is required to reclaim buffers */
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.pid = p->p_pid;
dev->lock.lock_time = ticks;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
retcode = tsleep(&dev->lock.lock_queue,
PZERO|PCATCH,
"drmlk2",
0);
if (retcode)
break;
}
if(!retcode) {
mga_reclaim_buffers(dev, priv->pid);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
}
funsetown(dev->buf_sigio);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
priv = drm_find_file_by_proc(dev, p);
if (priv) {
priv->refs--;
if (!priv->refs) {
TAILQ_REMOVE(&dev->files, priv, link);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
}
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
atomic_inc(&dev->total_close);
simple_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
simple_unlock(&dev->count_lock);
return EBUSY;
}
simple_unlock(&dev->count_lock);
device_unbusy(dev->device);
return mga_takedown(dev);
}
simple_unlock(&dev->count_lock);
return retcode;
}
/* mga_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int
mga_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
int nr = DRM_IOCTL_NR(cmd);
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
d_ioctl_t *func;
DRM_DEBUG("dev=%p\n", dev);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
p->p_pid, cmd, nr, priv->authenticated);
switch (cmd) {
case FIONBIO:
atomic_dec(&dev->ioctl_count);
return 0;
case FIOASYNC:
atomic_dec(&dev->ioctl_count);
dev->flags |= FASYNC;
return 0;
case FIOSETOWN:
atomic_dec(&dev->ioctl_count);
return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN:
atomic_dec(&dev->ioctl_count);
*(int *) data = fgetown(dev->buf_sigio);
return 0;
}
if (nr >= MGA_IOCTL_COUNT) {
retcode = EINVAL;
} else {
ioctl = &mga_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = EINVAL;
} else if ((ioctl->root_only && suser(p))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = EACCES;
} else {
retcode = (func)(kdev, cmd, data, flags, p);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int
mga_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_lock_t lock;
int s;
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
s = splsofttq();
mga_dma_schedule(dev, 1);
splx(s);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

310
bsd/mga_drm.h Normal file
View File

@ -0,0 +1,310 @@
/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jeff Hartmann <jhartmann@valinux.com>
* Keith Whitwell <keithw@valinux.com>
*
* Rewritten by:
* Gareth Hughes <gareth@valinux.com>
*/
#ifndef __MGA_DRM_H__
#define __MGA_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
*/
#ifndef __MGA_SAREA_DEFINES__
#define __MGA_SAREA_DEFINES__
/* WARP pipe flags
*/
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
#define MGA_WARP_TGZA (MGA_A)
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
#define MGA_WARP_TGZS (MGA_S)
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
#define MGA_WARP_T2GZ (MGA_T2)
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
/* What needs to be changed for the current vertex dma buffer?
*/
#define MGA_UPLOAD_CONTEXT 0x1
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#if 0
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
#endif
/* 32 buffers of 64k each, total 2 meg.
*/
#define MGA_BUFFER_SIZE (1 << 16)
#define MGA_NUM_BUFFERS 128
/* Keep these small for testing.
*/
#define MGA_NR_SAREA_CLIPRECTS 8
/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define MGA_CARD_HEAP 0
#define MGA_AGP_HEAP 1
#define MGA_NR_TEX_HEAPS 2
#define MGA_NR_TEX_REGIONS 16
#define MGA_LOG_MIN_TEX_REGION_SIZE 16
#endif /* __MGA_SAREA_DEFINES__ */
/* Setup registers for 3D context
*/
typedef struct {
unsigned int dstorg;
unsigned int maccess;
unsigned int plnwt;
unsigned int dwgctl;
unsigned int alphactrl;
unsigned int fogcolor;
unsigned int wflag;
unsigned int tdualstage0;
unsigned int tdualstage1;
unsigned int fcol;
unsigned int stencil;
unsigned int stencilctl;
} drm_mga_context_regs_t;
/* Setup registers for 2D, X server
*/
typedef struct {
unsigned int pitch;
} drm_mga_server_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int texctl;
unsigned int texctl2;
unsigned int texfilter;
unsigned int texbordercol;
unsigned int texorg;
unsigned int texwidth;
unsigned int texheight;
unsigned int texorg1;
unsigned int texorg2;
unsigned int texorg3;
unsigned int texorg4;
} drm_mga_texture_regs_t;
/* General aging mechanism
*/
typedef struct {
unsigned int head; /* Position of head pointer */
unsigned int wrap; /* Primary DMA wrap count */
} drm_mga_age_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
drm_mga_context_regs_t context_state;
drm_mga_server_regs_t server_state;
drm_mga_texture_regs_t tex_state[2];
unsigned int warp_pipe;
unsigned int dirty;
unsigned int vertsize;
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
* exported_ fields and puts the cliprects into boxes, above.
*
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
unsigned int status[4];
unsigned int last_wrap;
drm_mga_age_t last_frame;
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
unsigned int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_mga_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
} drm_mga_warp_index_t;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
unsigned long sarea_priv_offset;
int chipset;
int sgram;
unsigned int maccess;
unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
unsigned int texture_size[MGA_NR_TEX_HEAPS];
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long status_offset;
unsigned long warp_offset;
unsigned long primary_offset;
unsigned long buffers_offset;
} drm_mga_init_t;
typedef struct drm_mga_fullscreen {
enum {
MGA_INIT_FULLSCREEN = 0x01,
MGA_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_mga_fullscreen_t;
typedef struct drm_mga_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_mga_clear_t;
typedef struct drm_mga_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
typedef struct drm_mga_indices {
int idx; /* buffer to queue */
unsigned int start;
unsigned int end;
int discard; /* client finished with buffer? */
} drm_mga_indices_t;
typedef struct drm_mga_iload {
int idx;
unsigned int dstorg;
unsigned int length;
} drm_mga_iload_t;
typedef struct _drm_mga_blit {
unsigned int planemask;
unsigned int srcorg;
unsigned int dstorg;
int src_pitch, dst_pitch;
int delta_sx, delta_sy;
int delta_dx, delta_dy;
int height, ydir; /* flip image vertically */
int source_pitch, dest_pitch;
} drm_mga_blit_t;
#endif

287
bsd/r128_drm.h Normal file
View File

@ -0,0 +1,287 @@
/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
*/
#ifndef __R128_DRM_H__
#define __R128_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (r128_sarea.h)
*/
#ifndef __R128_SAREA_DEFINES__
#define __R128_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
*/
#define R128_UPLOAD_CONTEXT 0x001
#define R128_UPLOAD_SETUP 0x002
#define R128_UPLOAD_TEX0 0x004
#define R128_UPLOAD_TEX1 0x008
#define R128_UPLOAD_TEX0IMAGES 0x010
#define R128_UPLOAD_TEX1IMAGES 0x020
#define R128_UPLOAD_CORE 0x040
#define R128_UPLOAD_MASKS 0x080
#define R128_UPLOAD_WINDOW 0x100
#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
#define R128_REQUIRE_QUIESCENCE 0x400
#define R128_UPLOAD_ALL 0x7ff
#define R128_FRONT 0x1
#define R128_BACK 0x2
#define R128_DEPTH 0x4
/* Primitive types
*/
#define R128_POINTS 0x1
#define R128_LINES 0x2
#define R128_LINE_STRIP 0x3
#define R128_TRIANGLES 0x4
#define R128_TRIANGLE_FAN 0x5
#define R128_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#define R128_BUFFER_SIZE 16384
/* Byte offsets for indirect buffer data
*/
#define R128_INDEX_PRIM_OFFSET 20
#define R128_HOSTDATA_BLIT_OFFSET 32
/* Keep these small for testing.
*/
#define R128_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/AGP). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define R128_LOCAL_TEX_HEAP 0
#define R128_AGP_TEX_HEAP 1
#define R128_NR_TEX_HEAPS 2
#define R128_NR_TEX_REGIONS 64
#define R128_LOG_TEX_GRANULARITY 16
#define R128_NR_CONTEXT_REGS 12
#define R128_MAX_TEXTURE_LEVELS 11
#define R128_MAX_TEXTURE_UNITS 2
#endif /* __R128_SAREA_DEFINES__ */
typedef struct {
/* Context state - can be written in one large chunk */
unsigned int dst_pitch_offset_c;
unsigned int dp_gui_master_cntl_c;
unsigned int sc_top_left_c;
unsigned int sc_bottom_right_c;
unsigned int z_offset_c;
unsigned int z_pitch_c;
unsigned int z_sten_cntl_c;
unsigned int tex_cntl_c;
unsigned int misc_3d_state_cntl_reg;
unsigned int texture_clr_cmp_clr_c;
unsigned int texture_clr_cmp_msk_c;
unsigned int fog_color_c;
/* Texture state */
unsigned int tex_size_pitch_c;
unsigned int constant_color_c;
/* Setup state */
unsigned int pm4_vc_fpu_setup;
unsigned int setup_cntl;
/* Mask state */
unsigned int dp_write_mask;
unsigned int sten_ref_mask_c;
unsigned int plane_3d_mask_c;
/* Window state */
unsigned int window_xy_offset;
/* Core state */
unsigned int scale_3d_cntl;
} drm_r128_context_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int tex_cntl;
unsigned int tex_combine_cntl;
unsigned int tex_size_pitch;
unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
unsigned int tex_border_color;
} drm_r128_texture_regs_t;
typedef struct drm_r128_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
*/
drm_r128_context_regs_t context_state;
drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
} drm_r128_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
typedef struct drm_r128_init {
enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int sarea_priv_offset;
#else
unsigned long sarea_priv_offset;
#endif
int is_pci;
int cce_mode;
int cce_secure;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
#else
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
#endif
} drm_r128_init_t;
typedef struct drm_r128_cce_stop {
int flush;
int idle;
} drm_r128_cce_stop_t;
typedef struct drm_r128_clear {
unsigned int flags;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int x, y, w, h;
#endif
unsigned int clear_color;
unsigned int clear_depth;
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
unsigned int color_mask;
unsigned int depth_mask;
#endif
} drm_r128_clear_t;
typedef struct drm_r128_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_r128_vertex_t;
typedef struct drm_r128_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_r128_indices_t;
typedef struct drm_r128_blit {
int idx;
int pitch;
int offset;
int format;
unsigned short x, y;
unsigned short width, height;
} drm_r128_blit_t;
typedef struct drm_r128_depth {
enum {
R128_WRITE_SPAN = 0x01,
R128_WRITE_PIXELS = 0x02,
R128_READ_SPAN = 0x03,
R128_READ_PIXELS = 0x04
} func;
int n;
int *x;
int *y;
unsigned int *buffer;
unsigned char *mask;
} drm_r128_depth_t;
typedef struct drm_r128_stipple {
unsigned int *mask;
} drm_r128_stipple_t;
typedef struct drm_r128_indirect {
int idx;
int start;
int end;
int discard;
} drm_r128_indirect_t;
typedef struct drm_r128_fullscreen {
enum {
R128_INIT_FULLSCREEN = 0x01,
R128_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_r128_fullscreen_t;
#endif

View File

@ -1,10 +1,10 @@
# $FreeBSD$ # $FreeBSD$
KMOD = tdfx KMOD= tdfx
SRCS = tdfx_drv.c tdfx_context.c NOMAN= YES
SRCS += device_if.h bus_if.h pci_if.h SRCS= tdfx_drv.c
CFLAGS += ${DEBUG_FLAGS} -I. -I.. SRCS+= device_if.h bus_if.h pci_if.h opt_drm_linux.h
KMODDEPS = drm CFLAGS+= ${DEBUG_FLAGS} -I. -I..
@: @:
ln -sf /sys @ ln -sf /sys @
@ -12,4 +12,14 @@ KMODDEPS = drm
machine: machine:
ln -sf /sys/i386/include machine ln -sf /sys/i386/include machine
.if ${MACHINE_ARCH} == "i386"
# This line enables linux ioctl handling
# If you want support for this uncomment this line
#TDFX_OPTS= "\#define DRM_LINUX" 1
.endif
opt_drm_linux.h:
touch opt_drm_linux.h
echo $(TDFX_OPTS) >> opt_drm_linux.h
.include <bsd.kmod.mk> .include <bsd.kmod.mk>

View File

@ -1,204 +0,0 @@
/* tdfx_context.c -- IOCTLs for tdfx contexts -*- c -*-
* Created: Thu Oct 7 10:50:22 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
*
*/
#include "drmP.h"
#include "tdfx_drv.h"
extern drm_ctx_t tdfx_res_ctx;
static int tdfx_alloc_queue(drm_device_t *dev)
{
return drm_ctxbitmap_next(dev);
}
int tdfx_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
getnanotime(&dev->ctx_start);
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
tdfx_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int tdfx_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = ticks;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &dev->lck_start);
atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
}
#endif
clear_bit(0, &dev->context_flag);
wakeup(&dev->context_wait);
return 0;
}
int
tdfx_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i, error;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
res = *(drm_ctx_res_t *) data;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
error = copyout(&i, &res.contexts[i], sizeof(i));
if (error) return error;
}
}
res.count = DRM_RESERVED_CONTEXTS;
*(drm_ctx_res_t *) data = res;
return 0;
}
int
tdfx_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
if ((ctx.handle = tdfx_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = tdfx_alloc_queue(dev);
}
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return ENOMEM;
}
*(drm_ctx_t *) data = ctx;
return 0;
}
int
tdfx_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
if (ctx.flags==_DRM_CONTEXT_PRESERVED)
tdfx_res_ctx.handle=ctx.handle;
return 0;
}
int
tdfx_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
*(drm_ctx_t *) data = ctx;
return 0;
}
int
tdfx_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
return tdfx_context_switch(dev, dev->last_context, ctx.handle);
}
int
tdfx_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
tdfx_context_switch_complete(dev, ctx.handle);
return 0;
}
int
tdfx_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
drm_ctxbitmap_free(dev, ctx.handle);
return 0;
}

View File

@ -1,4 +1,4 @@
/* tdfx.c -- tdfx driver -*- c -*- /* tdfx_drv.c -- tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
* *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@ -27,715 +27,74 @@
* Authors: * Authors:
* Rickard E. (Rik) Faith <faith@valinux.com> * Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com> * Daryll Strauss <daryll@valinux.com>
* * Gareth Hughes <gareth@valinux.com>
*/ */
#include "drmP.h"
#include "tdfx_drv.h"
#include <sys/types.h>
#include <sys/bus.h>
#include <pci/pcivar.h> #include <pci/pcivar.h>
#include <opt_drm_linux.h>
MODULE_DEPEND(tdfx, drm, 1, 1, 1); #include "tdfx.h"
#ifdef DRM_AGP #include "drmP.h"
MODULE_DEPEND(tdfx, agp, 1, 1, 1);
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "tdfx"
#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
#define DRIVER_DATE "20010216"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#ifndef PCI_VENDOR_ID_3DFX
#define PCI_VENDOR_ID_3DFX 0x121A
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO5
#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO4
#define PCI_DEVICE_ID_3DFX_VOODOO4 0x0007
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO3_3000 /* Voodoo3 3000 */
#define PCI_DEVICE_ID_3DFX_VOODOO3_3000 0x0005
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO3_2000 /* Voodoo3 3000 */
#define PCI_DEVICE_ID_3DFX_VOODOO3_2000 0x0004
#endif
#ifndef PCI_DEVICE_ID_3DFX_BANSHEE
#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003
#endif #endif
#define TDFX_NAME "tdfx" /* List acquired from http://www.yourvote.com/pci/pcihdr.h and xc/xc/programs/Xserver/hw/xfree86/common/xf86PciInfo.h
#define TDFX_DESC "tdfx" * Please report to anholt@teleport.com inaccuracies or if a chip you have works that is marked unsupported here.
#define TDFX_DATE "20000928" */
#define TDFX_MAJOR 1 drm_chipinfo_t DRM(devicelist)[] = {
#define TDFX_MINOR 0 {0x121a, 0x0003, 1, "3dfx Voodoo Banshee"},
#define TDFX_PATCHLEVEL 0 {0x121a, 0x0004, 1, "3dfx Voodoo3 2000"},
{0x121a, 0x0005, 1, "3dfx Voodoo3 3000"},
static int tdfx_init(device_t nbdev); {0x121a, 0x0007, 1, "3dfx Voodoo4"},
static void tdfx_cleanup(device_t nbdev); {0x121a, 0x0009, 1, "3dfx Voodoo5"},
{0, 0, 0, NULL}
drm_ctx_t tdfx_res_ctx;
static int tdfx_probe(device_t dev)
{
const char *s = 0;
switch (pci_get_devid(dev)) {
case 0x0003121a:
s = "3Dfx Voodoo Banshee graphics accelerator";
break;
case 0x0005121a:
s = "3Dfx Voodoo 3 graphics accelerator";
break;
case 0x0009121a:
s = "3Dfx Voodoo 5 graphics accelerator";
break;
}
if (s) {
device_set_desc(dev, s);
return 0;
}
return ENXIO;
}
static int tdfx_attach(device_t dev)
{
tdfx_init(dev);
return 0;
}
static int tdfx_detach(device_t dev)
{
tdfx_cleanup(dev);
return 0;
}
static device_method_t tdfx_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tdfx_probe),
DEVMETHOD(device_attach, tdfx_attach),
DEVMETHOD(device_detach, tdfx_detach),
{ 0, 0 }
}; };
static driver_t tdfx_driver = {
"drm",
tdfx_methods,
sizeof(drm_device_t),
};
static devclass_t tdfx_devclass; #include "drm_auth.h"
#define TDFX_SOFTC(unit) \ #include "drm_bufs.h"
((drm_device_t *) devclass_get_softc(tdfx_devclass, unit)) #include "drm_context.h"
#include "drm_dma.h"
#include "drm_drawable.h"
#include "drm_drv.h"
DRIVER_MODULE(if_tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
#define CDEV_MAJOR 145 #include "drm_fops.h"
/* tdfx_drv.c */ #include "drm_init.h"
static d_open_t tdfx_open; #include "drm_ioctl.h"
static d_close_t tdfx_close; #include "drm_lock.h"
static d_ioctl_t tdfx_version; #include "drm_memory.h"
static d_ioctl_t tdfx_ioctl; #include "drm_vm.h"
static d_ioctl_t tdfx_lock; #include "drm_sysctl.h"
static d_ioctl_t tdfx_unlock;
static struct cdevsw tdfx_cdevsw = { DRIVER_MODULE(tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
/* open */ tdfx_open,
/* close */ tdfx_close,
/* read */ drm_read,
/* write */ drm_write,
/* ioctl */ tdfx_ioctl,
/* poll */ drm_poll,
/* mmap */ drm_mmap,
/* strategy */ nostrategy,
/* name */ "tdfx",
/* maj */ CDEV_MAJOR,
/* dump */ nodump,
/* psize */ nopsize,
/* flags */ D_TTY | D_TRACKCLOSE,
/* bmaj */ -1
};
static drm_ioctl_desc_t tdfx_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
#ifdef DRM_AGP
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_unbind, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_bind, 1, 1},
#endif
};
#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
static int
tdfx_setup(drm_device_t *dev)
{
int i;
device_busy(dev->device);
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
dev->lock.lock_queue = 0;
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
callout_init(&dev->timer);
dev->context_wait = 0;
timespecclear(&dev->ctx_start);
timespecclear(&dev->lck_start);
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
bzero(&dev->buf_sel, sizeof dev->buf_sel);
dev->buf_sigio = NULL;
dev->buf_readers = 0;
dev->buf_writers = 0;
dev->buf_selecting = 0;
tdfx_res_ctx.handle=-1;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int
tdfx_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
callout_stop(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
#ifdef DRM_AGP
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *temp;
drm_agp_mem_t *temp_next;
temp = dev->agp->memory;
while(temp != NULL) {
temp_next = temp->next;
drm_free_agp(temp->handle, temp->pages);
drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS);
temp = temp_next;
}
if (dev->agp->acquired)
agp_release(dev->agp->agpdev);
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
#endif
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break; /* XXX */
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wakeup(&dev->lock.lock_queue);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
/* tdfx_init is called via tdfx_attach at module load time, */
static int
tdfx_init(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
int retcode;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
simple_lock_init(&dev->count_lock);
lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
#if 0
drm_parse_options(tdfx);
#endif
dev->device = nbdev;
dev->devnode = make_dev(&tdfx_cdevsw,
device_get_unit(nbdev),
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
TDFX_NAME);
dev->name = TDFX_NAME;
drm_mem_init();
drm_sysctl_init(dev);
TAILQ_INIT(&dev->files);
#ifdef DRM_AGP
dev->agp = drm_agp_init();
#endif
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_sysctl_cleanup(dev);
tdfx_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
TDFX_NAME,
TDFX_MAJOR,
TDFX_MINOR,
TDFX_PATCHLEVEL,
TDFX_DATE,
device_get_unit(nbdev));
return 0;
}
/* tdfx_cleanup is called via tdfx_detach at module unload time. */
static void
tdfx_cleanup(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
drm_sysctl_cleanup(dev);
destroy_dev(dev->devnode);
DRM_INFO("Module unloaded\n");
drm_ctxbitmap_cleanup(dev);
tdfx_takedown(dev);
}
static int
tdfx_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_version_t version;
int len;
version = *(drm_version_t *) data;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
int error = copyout(value, name, len); \
if (error) return error; \
}
version.version_major = TDFX_MAJOR;
version.version_minor = TDFX_MINOR;
version.version_patchlevel = TDFX_PATCHLEVEL;
DRM_COPY(version.name, TDFX_NAME);
DRM_COPY(version.date, TDFX_DATE);
DRM_COPY(version.desc, TDFX_DESC);
*(drm_version_t *) data = version;
return 0;
}
static int
tdfx_open(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = TDFX_SOFTC(minor(kdev));
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
device_busy(dev->device);
if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
atomic_inc(&dev->total_open);
simple_lock(&dev->count_lock);
if (!dev->open_count++) {
simple_unlock(&dev->count_lock);
retcode = tdfx_setup(dev);
}
simple_unlock(&dev->count_lock);
}
device_unbusy(dev->device);
return retcode;
}
static int
tdfx_close(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_close(kdev, flags, fmt, p))) {
atomic_inc(&dev->total_close);
simple_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
simple_unlock(&dev->count_lock);
return EBUSY;
}
simple_unlock(&dev->count_lock);
device_unbusy(dev->device);
return tdfx_takedown(dev);
}
simple_unlock(&dev->count_lock);
}
return retcode;
}
/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
static int
tdfx_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
int nr = DRM_IOCTL_NR(cmd);
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
d_ioctl_t *func;
DRM_DEBUG("dev=%p\n", dev);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
p->p_pid, cmd, nr, priv->authenticated);
switch (cmd) {
case FIONBIO:
atomic_dec(&dev->ioctl_count);
return 0;
case FIOASYNC:
atomic_dec(&dev->ioctl_count);
dev->flags |= FASYNC;
return 0;
case FIOSETOWN:
atomic_dec(&dev->ioctl_count);
return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN:
atomic_dec(&dev->ioctl_count);
*(int *) data = fgetown(dev->buf_sigio);
return 0;
}
if (nr >= TDFX_IOCTL_COUNT) {
retcode = EINVAL;
} else {
ioctl = &tdfx_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = EINVAL;
} else if ((ioctl->root_only && suser(p))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = EACCES;
} else {
retcode = (func)(kdev, cmd, data, flags, p);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
static int
tdfx_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int ret = 0;
drm_lock_t lock;
#if DRM_DMA_HISTOGRAM
getnanotime(&dev->lck_start);
#endif
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, p->p_pid, dev->lock.hw_lock->lock,
lock.flags);
#if 0
/* dev->queue_count == 0 right now for
tdfx. FIXME? */
if (lock.context < 0 || lock.context >= dev->queue_count)
return EINVAL;
#endif
if (!ret) {
#if 0
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = ticks - dev->lock.lock_time;
if (lock.context == tdfx_res_ctx.handle &&
j >= 0 && j < DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d ticks=%d\n",
lock.context, p->p_pid, j,
dev->lock.lock_time, ticks);
ret = tsleep(&never, PZERO|PCATCH, "drmlk1",
DRM_LOCK_SLICE - j);
if (ret)
return ret;
DRM_DEBUG("ticks=%d\n", ticks);
}
}
#endif
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = p->p_pid;
dev->lock.lock_time = ticks;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
ret = tsleep(&dev->lock.lock_queue,
PZERO|PCATCH,
"drmlk2",
0);
if (ret)
break;
}
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != tdfx_res_ctx.handle &&
dev->last_context != tdfx_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
tdfx_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
#if 0
tdfx_quiescent(dev);
#endif
}
}
#if 0
DRM_ERROR("pid = %5d, old counter = %5ld\n",
p->p_pid, current->counter);
#endif
#if 0
while (current->counter > 25)
current->counter >>= 1; /* decrease time slice */
DRM_ERROR("pid = %5d, new counter = %5ld\n",
p->p_pid, current->counter);
#endif
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &dev->lck_start);
atomic_inc(&dev->histo.lhld[drm_histogram_slot(&ts)]);
}
#endif
return ret;
}
static int
tdfx_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_lock_t lock;
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
/* FIXME: Try to send data to card here */
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
return 0;
}

View File

@ -1,45 +0,0 @@
/* tdfx_drv.h -- Private header for tdfx driver -*- c -*-
* Created: Thu Oct 7 10:40:04 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*
*/
#ifndef _TDFX_DRV_H_
#define _TDFX_DRV_H_
/* tdfx_context.c */
extern d_ioctl_t tdfx_resctx;
extern d_ioctl_t tdfx_addctx;
extern d_ioctl_t tdfx_modctx;
extern d_ioctl_t tdfx_getctx;
extern d_ioctl_t tdfx_switchctx;
extern d_ioctl_t tdfx_newctx;
extern d_ioctl_t tdfx_rmctx;
extern int tdfx_context_switch(drm_device_t *dev, int old, int new);
extern int tdfx_context_switch_complete(drm_device_t *dev, int new);
#endif

View File

@ -27,7 +27,7 @@
* Authors: Rickard E. (Rik) Faith <faith@valinux.com> * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com> * Kevin E. Martin <martin@valinux.com>
* *
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.24 2001/08/18 02:51:13 dawes Exp $ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.25 2001/08/27 17:40:59 dawes Exp $
* *
*/ */

View File

@ -3,13 +3,14 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
O_TARGET := drm.o O_TARGET := drm.o
list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o radeon.o list-multi := gamma.o tdfx.o r128.o mga.o i810.o i830.o ffb.o radeon.o
gamma-objs := gamma_drv.o gamma_dma.o gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o ffb-objs := ffb_drv.o ffb_context.o
@ -19,6 +20,7 @@ obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_FFB) += ffb.o obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
@ -35,6 +37,9 @@ mga.o: $(mga-objs) $(lib)
i810.o: $(i810-objs) $(lib) i810.o: $(i810-objs) $(lib)
$(LD) -r -o $@ $(i810-objs) $(lib) $(LD) -r -o $@ $(i810-objs) $(lib)
i830.o: $(i830-objs) $(lib)
$(LD) -r -o $@ $(i830-objs) $(lib)
r128.o: $(r128-objs) $(lib) r128.o: $(r128-objs) $(lib)
$(LD) -r -o $@ $(r128-objs) $(lib) $(LD) -r -o $@ $(r128-objs) $(lib)

View File

@ -27,6 +27,10 @@
* Authors: * Authors:
* Rickard E. (Rik) Faith <faith@valinux.com> * Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com> * Gareth Hughes <gareth@valinux.com>
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/ */
#define __NO_VERSION__ #define __NO_VERSION__
@ -316,6 +320,10 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
/* Should this return -EBUSY instead? */ /* Should this return -EBUSY instead? */
return -ENOMEM; return -ENOMEM;
} }
#ifdef DRIVER_CTX_CTOR
if ( ctx.handle != DRM_KERNEL_CONTEXT )
DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) ) if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT; return -EFAULT;
@ -390,6 +398,9 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
priv->remove_auth_on_close = 1; priv->remove_auth_on_close = 1;
} }
if ( ctx.handle != DRM_KERNEL_CONTEXT ) { if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
#ifdef DRIVER_CTX_DTOR
DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
DRM(ctxbitmap_free)( dev, ctx.handle ); DRM(ctxbitmap_free)( dev, ctx.handle );
} }

View File

@ -125,21 +125,31 @@ ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
int avail; int avail;
int send; int send;
int cur; int cur;
DECLARE_WAITQUEUE(wait, current);
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp); DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
add_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (dev->buf_rp == dev->buf_wp) { while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n"); DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) { if (filp->f_flags & O_NONBLOCK) {
remove_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_RUNNING);
return -EAGAIN; return -EAGAIN;
} }
interruptible_sleep_on(&dev->buf_readers); schedule(); /* wait for dev->buf_readers */
if (signal_pending(current)) { if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n"); DRM_DEBUG(" interrupted\n");
remove_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_RUNNING);
return -ERESTARTSYS; return -ERESTARTSYS;
} }
DRM_DEBUG(" awake\n"); DRM_DEBUG(" awake\n");
set_current_state(TASK_INTERRUPTIBLE);
} }
remove_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_RUNNING);
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ; left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left; avail = DRM_BSZ - left;

View File

@ -86,6 +86,7 @@ static inline void i810_print_status_page(drm_device_t *dev)
DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]); DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]); DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]); DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]); DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
for(i = 6; i < dma->buf_count + 6; i++) { for(i = 6; i < dma->buf_count + 6; i++) {
DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]); DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
@ -471,6 +472,9 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->back_offset = init->back_offset; dev_priv->back_offset = init->back_offset;
dev_priv->depth_offset = init->depth_offset; dev_priv->depth_offset = init->depth_offset;
dev_priv->overlay_offset = init->overlay_offset;
dev_priv->overlay_physical = init->overlay_physical;
dev_priv->front_di1 = init->front_offset | init->pitch_bits; dev_priv->front_di1 = init->front_offset | init->pitch_bits;
dev_priv->back_di1 = init->back_offset | init->pitch_bits; dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits; dev_priv->zi1 = init->depth_offset | init->pitch_bits;
@ -1259,3 +1263,156 @@ int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
if(VM_DONTCOPY == 0) return 1; if(VM_DONTCOPY == 0) return 1;
return 0; return 0;
} }
static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
int u;
RING_LOCALS;
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_HARDWARE);
if(u != I810_BUF_CLIENT) {
DRM_DEBUG("MC found buffer that isn't mine!\n");
}
if (used > 4*1024)
used = 0;
sarea_priv->dirty = 0x7f;
DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
if (used & 4) {
*(u32 *)((u32)buf_priv->virtual + used) = 0;
used += 4;
}
i810_unmap_buffer(buf);
}
BEGIN_LP_RING(4);
OUT_RING( CMD_OP_BATCH_BUFFER );
OUT_RING( start | BB1_PROTECTED );
OUT_RING( start + used - 4 );
OUT_RING( 0 );
ADVANCE_LP_RING();
BEGIN_LP_RING(8);
OUT_RING( CMD_STORE_DWORD_IDX );
OUT_RING( buf_priv->my_use_idx );
OUT_RING( I810_BUF_FREE );
OUT_RING( 0 );
OUT_RING( CMD_STORE_DWORD_IDX );
OUT_RING( 16 );
OUT_RING( last_render );
OUT_RING( 0 );
ADVANCE_LP_RING();
}
int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u32 *hw_status = (u32 *)dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
drm_i810_mc_t mc;
if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma_mc called without lock held\n");
return -EINVAL;
}
i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
mc.last_render );
atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter-1;
sarea_priv->last_dispatch = (int) hw_status[5];
return 0;
}
int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
}
int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
drm_i810_overlay_t data;
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
return 0;
}
int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_fstatus called without lock held\n");
return -EINVAL;
}
return I810_READ(0x30008);
}
int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_ov0_flip called without lock held\n");
return -EINVAL;
}
//Tell the overlay to update
I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
return 0;
}

View File

@ -112,6 +112,8 @@ typedef struct _drm_i810_init {
unsigned int front_offset; unsigned int front_offset;
unsigned int back_offset; unsigned int back_offset;
unsigned int depth_offset; unsigned int depth_offset;
unsigned int overlay_offset;
unsigned int overlay_physical;
unsigned int w; unsigned int w;
unsigned int h; unsigned int h;
unsigned int pitch; unsigned int pitch;
@ -196,4 +198,18 @@ typedef struct drm_i810_dma {
int granted; int granted;
} drm_i810_dma_t; } drm_i810_dma_t;
typedef struct _drm_i810_overlay_t {
unsigned int offset; /* Address of the Overlay Regs */
unsigned int physical;
} drm_i810_overlay_t;
typedef struct _drm_i810_mc {
int idx; /* buffer index */
int used; /* nr bytes in use */
int num_blocks; /* number of GFXBlocks */
int *length; /* List of lengths for GFXBlocks (FUTURE)*/
unsigned int last_render; /* Last Render Request */
} drm_i810_mc_t;
#endif /* _I810_DRM_H_ */ #endif /* _I810_DRM_H_ */

View File

@ -39,10 +39,10 @@
#define DRIVER_NAME "i810" #define DRIVER_NAME "i810"
#define DRIVER_DESC "Intel i810" #define DRIVER_DESC "Intel i810"
#define DRIVER_DATE "20010917" #define DRIVER_DATE "20010920"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 1 #define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
@ -54,7 +54,12 @@
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }
#define __HAVE_COUNTERS 4 #define __HAVE_COUNTERS 4

View File

@ -73,6 +73,8 @@ typedef struct drm_i810_private {
int back_offset; int back_offset;
int depth_offset; int depth_offset;
int overlay_offset;
int overlay_physical;
int w, h; int w, h;
int pitch; int pitch;
} drm_i810_private_t; } drm_i810_private_t;
@ -94,6 +96,18 @@ extern int i810_copybuf(struct inode *inode, struct file *filp,
extern int i810_docopy(struct inode *inode, struct file *filp, extern int i810_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i810_dma_quiescent(drm_device_t *dev); extern void i810_dma_quiescent(drm_device_t *dev);
#define I810_VERBOSE 0 #define I810_VERBOSE 0

View File

@ -40,12 +40,12 @@
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
[DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \
[DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \
/* AGP Memory Management */ \ /* AGP Memory Management */ \
[DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \
[DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \
[DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 1 } [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 }
#if 0 /* these don't appear to be defined */ #if 0 /* these don't appear to be defined */
/* SIS Stereo */ /* SIS Stereo */
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 },

View File

@ -3,13 +3,14 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
O_TARGET := drm.o O_TARGET := drm.o
list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o radeon.o list-multi := gamma.o tdfx.o r128.o mga.o i810.o i830.o ffb.o radeon.o
gamma-objs := gamma_drv.o gamma_dma.o gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o ffb-objs := ffb_drv.o ffb_context.o
@ -19,6 +20,7 @@ obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_FFB) += ffb.o obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
@ -35,6 +37,9 @@ mga.o: $(mga-objs) $(lib)
i810.o: $(i810-objs) $(lib) i810.o: $(i810-objs) $(lib)
$(LD) -r -o $@ $(i810-objs) $(lib) $(LD) -r -o $@ $(i810-objs) $(lib)
i830.o: $(i830-objs) $(lib)
$(LD) -r -o $@ $(i830-objs) $(lib)
r128.o: $(r128-objs) $(lib) r128.o: $(r128-objs) $(lib)
$(LD) -r -o $@ $(r128-objs) $(lib) $(LD) -r -o $@ $(r128-objs) $(lib)

View File

@ -31,15 +31,6 @@
# like this: # like this:
# make TREE=/usr/my-kernel-tree/include # make TREE=/usr/my-kernel-tree/include
# #
#
# ***** NOTE NOTE NOTE NOTE NOTE *****
# Because some distributions patch 2.2.x kernels to make kill_fasync have
# three parameters, this script tries to determine, via the examination of
# header files, if your kernel has been patched. If this detection is
# incorrect, you can override the value on the command line, like this:
# make PARAMS=2
# or
# make PARAMS=3
.SUFFIXES: .SUFFIXES:
@ -132,10 +123,6 @@ MODVERSIONS := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
| grep -s 'MODVERSIONS = ' | cut -d' ' -f3) | grep -s 'MODVERSIONS = ' | cut -d' ' -f3)
AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \ AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
| grep -s 'AGP = ' | cut -d' ' -f3) | grep -s 'AGP = ' | cut -d' ' -f3)
SIS := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
| grep -s 'SIS = ' | cut -d' ' -f3)
PARAMS := $(shell if fgrep kill_fasync $(TREE)/linux/fs.h 2>/dev/null \
| egrep -q '(band|int, int)'; then echo 3; else echo 2; fi)
MACHINE := $(shell echo `uname -m`) MACHINE := $(shell echo `uname -m`)
ifeq ($(AGP),0) ifeq ($(AGP),0)
AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \ AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
@ -148,9 +135,11 @@ DRMTEMPLATES += drm_agpsupport.h
MODS += mga.o MODS += mga.o
ifeq ($(MACHINE),i386) ifeq ($(MACHINE),i386)
MODS += i810.o MODS += i810.o
MODS += i830.o
endif endif
ifeq ($(MACHINE),i686) ifeq ($(MACHINE),i686)
MODS += i810.o MODS += i810.o
MODS += i830.o
endif endif
MGAOBJS = mga_drv.o mga_dma.o mga_state.o mga_warp.o MGAOBJS = mga_drv.o mga_dma.o mga_state.o mga_warp.o
@ -159,29 +148,23 @@ MGAHEADERS = mga.h mga_drv.h mga_drm.h $(DRMHEADERS) $(DRMTEMPLATES)
I810OBJS = i810_drv.o i810_dma.o I810OBJS = i810_drv.o i810_dma.o
I810HEADERS = i810.h i810_drv.h i810_drm.h $(DRMHEADERS) $(DRMTEMPLATES) I810HEADERS = i810.h i810_drv.h i810_drm.h $(DRMHEADERS) $(DRMTEMPLATES)
I830OBJS = i830_drv.o i830_dma.o
I830HEADERS = i830.h i830_drv.h i830_drm.h $(DRMHEADERS) $(DRMTEMPLATES)
endif endif
ifeq ($(MACHINE),alpha) ifeq ($(MACHINE),alpha)
MODCFLAGS+= -ffixed-8 -mno-fp-regs -mcpu=ev56 -Wa,-mev6 MODCFLAGS+= -ffixed-8 -mno-fp-regs -mcpu=ev56 -Wa,-mev6
endif endif
ifeq ($(SIS),1)
# It appears that the SiS driver makes calls to sis_malloc and sis_free, and
# that these calls are only defined if CONFIG_FB_SIS is selected. So, key
# off that to determine if we should attempt to build the SiS driver.
#
# A better way would be to detect the appropriate definitions in the header
# file to see if we can, at least, compile the driver.
MODS += sis.o MODS += sis.o
SISOBJS= sis_drv.o sis_mm.o sis_ds.o SISOBJS= sis_drv.o sis_mm.o sis_ds.o
SISHEADERS= sis_drv.h sis_drm.h $(DRMHEADERS) SISHEADERS= sis_drv.h sis_drm.h $(DRMHEADERS)
MODCFLAGS += -DCONFIG_DRM_SIS MODCFLAGS += -DCONFIG_DRM_SIS
endif
all::;@echo === KERNEL HEADERS IN $(TREE) all::;@echo === KERNEL HEADERS IN $(TREE)
all::;@echo === SMP=${SMP} MODULES=${MODULES} MODVERSIONS=${MODVERSIONS} AGP=${AGP} all::;@echo === SMP=${SMP} MODULES=${MODULES} MODVERSIONS=${MODVERSIONS} AGP=${AGP}
all::;@echo === kill_fasync has $(PARAMS) parameters
all::;@echo === Compiling for machine $(MACHINE) all::;@echo === Compiling for machine $(MACHINE)
all::;@echo === WARNING all::;@echo === WARNING
all::;@echo === WARNING Use 2.4.x kernels ONLY ! all::;@echo === WARNING Use 2.4.x kernels ONLY !
@ -206,9 +189,6 @@ endif
ifeq ($(MODVERSIONS),1) ifeq ($(MODVERSIONS),1)
MODCFLAGS += -DMODVERSIONS -include $(TREE)/linux/modversions.h MODCFLAGS += -DMODVERSIONS -include $(TREE)/linux/modversions.h
endif endif
ifeq ($(PARAMS),3)
MODCFLAGS += -DKILLFASYNCHASTHREEPARAMETERS
endif
# **** End of configuration # **** End of configuration
@ -252,6 +232,11 @@ i810_drv.o: i810_drv.c
i810.o: $(I810OBJS) $(LIBS) i810.o: $(I810OBJS) $(LIBS)
$(LD) -r $^ -o $@ $(LD) -r $^ -o $@
i830_drv.o: i830_drv.c
$(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@
i830.o: $(I830OBJS) $(LIBS)
$(LD) -r $^ -o $@
endif endif
.PHONY: ChangeLog .PHONY: ChangeLog
@ -272,6 +257,7 @@ $(TDFXOBJS): $(TDFXHEADERS)
ifeq ($(AGP),1) ifeq ($(AGP),1)
$(MGAOBJS): $(MGAHEADERS) $(MGAOBJS): $(MGAHEADERS)
$(I810OBJS): $(I810HEADERS) $(I810OBJS): $(I810HEADERS)
$(I830OBJS): $(I830HEADERS)
$(R128OBJS): $(R128HEADERS) $(R128OBJS): $(R128HEADERS)
$(RADEONOBJS): $(RADEONHEADERS) $(RADEONOBJS): $(RADEONHEADERS)
endif endif

View File

@ -104,9 +104,8 @@ typedef struct drm_tex_region {
#include "i810_drm.h" #include "i810_drm.h"
#include "r128_drm.h" #include "r128_drm.h"
#include "radeon_drm.h" #include "radeon_drm.h"
#ifdef CONFIG_DRM_SIS
#include "sis_drm.h" #include "sis_drm.h"
#endif #include "i830_drm.h"
typedef struct drm_version { typedef struct drm_version {
int version_major; /* Major version */ int version_major; /* Major version */
@ -449,6 +448,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) #define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) #define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) #define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
/* Rage 128 specific ioctls */ /* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) #define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
@ -483,7 +488,6 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) #define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) #define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */ /* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) #define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) #define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
@ -493,6 +497,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) #define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) #define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) #define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
#endif
/* I830 specific ioctls */
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif #endif

View File

@ -27,6 +27,10 @@
* Authors: * Authors:
* Rickard E. (Rik) Faith <faith@valinux.com> * Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com> * Gareth Hughes <gareth@valinux.com>
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/ */
#define __NO_VERSION__ #define __NO_VERSION__
@ -316,6 +320,10 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
/* Should this return -EBUSY instead? */ /* Should this return -EBUSY instead? */
return -ENOMEM; return -ENOMEM;
} }
#ifdef DRIVER_CTX_CTOR
if ( ctx.handle != DRM_KERNEL_CONTEXT )
DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) ) if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT; return -EFAULT;
@ -390,6 +398,9 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
priv->remove_auth_on_close = 1; priv->remove_auth_on_close = 1;
} }
if ( ctx.handle != DRM_KERNEL_CONTEXT ) { if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
#ifdef DRIVER_CTX_DTOR
DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
DRM(ctxbitmap_free)( dev, ctx.handle ); DRM(ctxbitmap_free)( dev, ctx.handle );
} }

View File

@ -125,21 +125,31 @@ ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
int avail; int avail;
int send; int send;
int cur; int cur;
DECLARE_WAITQUEUE(wait, current);
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp); DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
add_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (dev->buf_rp == dev->buf_wp) { while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n"); DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) { if (filp->f_flags & O_NONBLOCK) {
remove_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_RUNNING);
return -EAGAIN; return -EAGAIN;
} }
interruptible_sleep_on(&dev->buf_readers); schedule(); /* wait for dev->buf_readers */
if (signal_pending(current)) { if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n"); DRM_DEBUG(" interrupted\n");
remove_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_RUNNING);
return -ERESTARTSYS; return -ERESTARTSYS;
} }
DRM_DEBUG(" awake\n"); DRM_DEBUG(" awake\n");
set_current_state(TASK_INTERRUPTIBLE);
} }
remove_wait_queue(&dev->buf_readers, &wait);
set_current_state(TASK_RUNNING);
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ; left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left; avail = DRM_BSZ - left;

View File

@ -86,6 +86,7 @@ static inline void i810_print_status_page(drm_device_t *dev)
DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]); DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]); DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]); DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]); DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
for(i = 6; i < dma->buf_count + 6; i++) { for(i = 6; i < dma->buf_count + 6; i++) {
DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]); DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
@ -471,6 +472,9 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->back_offset = init->back_offset; dev_priv->back_offset = init->back_offset;
dev_priv->depth_offset = init->depth_offset; dev_priv->depth_offset = init->depth_offset;
dev_priv->overlay_offset = init->overlay_offset;
dev_priv->overlay_physical = init->overlay_physical;
dev_priv->front_di1 = init->front_offset | init->pitch_bits; dev_priv->front_di1 = init->front_offset | init->pitch_bits;
dev_priv->back_di1 = init->back_offset | init->pitch_bits; dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits; dev_priv->zi1 = init->depth_offset | init->pitch_bits;
@ -1259,3 +1263,156 @@ int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
if(VM_DONTCOPY == 0) return 1; if(VM_DONTCOPY == 0) return 1;
return 0; return 0;
} }
static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
int u;
RING_LOCALS;
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_HARDWARE);
if(u != I810_BUF_CLIENT) {
DRM_DEBUG("MC found buffer that isn't mine!\n");
}
if (used > 4*1024)
used = 0;
sarea_priv->dirty = 0x7f;
DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
if (used & 4) {
*(u32 *)((u32)buf_priv->virtual + used) = 0;
used += 4;
}
i810_unmap_buffer(buf);
}
BEGIN_LP_RING(4);
OUT_RING( CMD_OP_BATCH_BUFFER );
OUT_RING( start | BB1_PROTECTED );
OUT_RING( start + used - 4 );
OUT_RING( 0 );
ADVANCE_LP_RING();
BEGIN_LP_RING(8);
OUT_RING( CMD_STORE_DWORD_IDX );
OUT_RING( buf_priv->my_use_idx );
OUT_RING( I810_BUF_FREE );
OUT_RING( 0 );
OUT_RING( CMD_STORE_DWORD_IDX );
OUT_RING( 16 );
OUT_RING( last_render );
OUT_RING( 0 );
ADVANCE_LP_RING();
}
int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u32 *hw_status = (u32 *)dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
drm_i810_mc_t mc;
if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma_mc called without lock held\n");
return -EINVAL;
}
i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
mc.last_render );
atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter-1;
sarea_priv->last_dispatch = (int) hw_status[5];
return 0;
}
int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
}
int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
drm_i810_overlay_t data;
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
return 0;
}
int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_fstatus called without lock held\n");
return -EINVAL;
}
return I810_READ(0x30008);
}
int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_ov0_flip called without lock held\n");
return -EINVAL;
}
//Tell the overlay to update
I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
return 0;
}

View File

@ -112,6 +112,8 @@ typedef struct _drm_i810_init {
unsigned int front_offset; unsigned int front_offset;
unsigned int back_offset; unsigned int back_offset;
unsigned int depth_offset; unsigned int depth_offset;
unsigned int overlay_offset;
unsigned int overlay_physical;
unsigned int w; unsigned int w;
unsigned int h; unsigned int h;
unsigned int pitch; unsigned int pitch;
@ -196,4 +198,18 @@ typedef struct drm_i810_dma {
int granted; int granted;
} drm_i810_dma_t; } drm_i810_dma_t;
typedef struct _drm_i810_overlay_t {
unsigned int offset; /* Address of the Overlay Regs */
unsigned int physical;
} drm_i810_overlay_t;
typedef struct _drm_i810_mc {
int idx; /* buffer index */
int used; /* nr bytes in use */
int num_blocks; /* number of GFXBlocks */
int *length; /* List of lengths for GFXBlocks (FUTURE)*/
unsigned int last_render; /* Last Render Request */
} drm_i810_mc_t;
#endif /* _I810_DRM_H_ */ #endif /* _I810_DRM_H_ */

View File

@ -39,10 +39,10 @@
#define DRIVER_NAME "i810" #define DRIVER_NAME "i810"
#define DRIVER_DESC "Intel i810" #define DRIVER_DESC "Intel i810"
#define DRIVER_DATE "20010917" #define DRIVER_DATE "20010920"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 1 #define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
@ -54,7 +54,12 @@
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }
#define __HAVE_COUNTERS 4 #define __HAVE_COUNTERS 4

View File

@ -73,6 +73,8 @@ typedef struct drm_i810_private {
int back_offset; int back_offset;
int depth_offset; int depth_offset;
int overlay_offset;
int overlay_physical;
int w, h; int w, h;
int pitch; int pitch;
} drm_i810_private_t; } drm_i810_private_t;
@ -94,6 +96,18 @@ extern int i810_copybuf(struct inode *inode, struct file *filp,
extern int i810_docopy(struct inode *inode, struct file *filp, extern int i810_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i810_dma_quiescent(drm_device_t *dev); extern void i810_dma_quiescent(drm_device_t *dev);
#define I810_VERBOSE 0 #define I810_VERBOSE 0

View File

@ -22,14 +22,9 @@
#define CONFIG_AGP 0 #define CONFIG_AGP 0
#endif #endif
#ifndef CONFIG_FB_SIS
#define CONFIG_FB_SIS 0
#endif
SMP = CONFIG_SMP SMP = CONFIG_SMP
MODULES = CONFIG_MODULES MODULES = CONFIG_MODULES
MODVERSIONS = CONFIG_MODVERSIONS MODVERSIONS = CONFIG_MODVERSIONS
AGP = CONFIG_AGP AGP = CONFIG_AGP
AGP_MODULE = CONFIG_AGP_MODULE AGP_MODULE = CONFIG_AGP_MODULE
RELEASE = UTS_RELEASE RELEASE = UTS_RELEASE
SIS = CONFIG_FB_SIS

View File

@ -1058,7 +1058,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
DRM_ERROR( "EFAULT on tex->image\n" ); DRM_ERROR( "EFAULT on tex->image\n" );
return -EFAULT; return -EFAULT;
} }
} else if ( size < 4 ) { } else if ( size < 4 && size > 0 ) {
size = 4; size = 4;
} }

View File

@ -24,7 +24,7 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
* *
*/ */
/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.1 2001/05/19 18:29:22 dawes Exp $ */ /* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.2 2001/12/19 21:25:59 dawes Exp $ */
#ifndef __SIS_H__ #ifndef __SIS_H__
#define __SIS_H__ #define __SIS_H__
@ -47,4 +47,10 @@
#define DRIVER_AGP_BUFFERS_MAP( dev ) \ #define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_sis_private_t *)((dev)->dev_private))->buffers ((drm_sis_private_t *)((dev)->dev_private))->buffers
extern int sis_init_context(int context);
extern int sis_final_context(int context);
#define DRIVER_CTX_CTOR sis_init_context
#define DRIVER_CTX_DTOR sis_final_context
#endif #endif

View File

@ -40,12 +40,12 @@
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
[DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \
[DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \
/* AGP Memory Management */ \ /* AGP Memory Management */ \
[DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \
[DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 1 }, \ [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \
[DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 1 } [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 }
#if 0 /* these don't appear to be defined */ #if 0 /* these don't appear to be defined */
/* SIS Stereo */ /* SIS Stereo */
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 },

View File

@ -33,7 +33,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/malloc.h> #include <linux/slab.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/pci.h> #include <linux/pci.h>

View File

@ -72,7 +72,7 @@ static int del_alloc_set(int context, int type, unsigned int val)
} }
/* fb management via fb device */ /* fb management via fb device */
#if 0 #if 1
int sis_fb_alloc(struct inode *inode, struct file *filp, unsigned int cmd, int sis_fb_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
@ -90,7 +90,7 @@ int sis_fb_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
fb.offset = req.offset; fb.offset = req.offset;
fb.free = req.offset; fb.free = req.offset;
if(!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)){ if(!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)){
DRM_DEBUG("adding to allocation set fails"); DRM_DEBUG("adding to allocation set fails\n");
sis_free(req.offset); sis_free(req.offset);
retval = -1; retval = -1;
} }
@ -185,7 +185,7 @@ int sisp_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
agp.offset = block->ofs; agp.offset = block->ofs;
agp.free = (unsigned int)block; agp.free = (unsigned int)block;
if(!add_alloc_set(agp.context, AGP_TYPE, agp.free)){ if(!add_alloc_set(agp.context, AGP_TYPE, agp.free)){
DRM_DEBUG("adding to allocation set fails"); DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock)agp.free); mmFreeMem((PMemBlock)agp.free);
retval = -1; retval = -1;
} }
@ -279,9 +279,7 @@ int sis_final_context(int context)
retval = setFirst(set, &item); retval = setFirst(set, &item);
while(retval){ while(retval){
DRM_DEBUG("free video memory 0x%x\n", item); DRM_DEBUG("free video memory 0x%x\n", item);
#if 0
sis_free(item); sis_free(item);
#endif
retval = setNext(set, &item); retval = setNext(set, &item);
} }
setDestroy(set); setDestroy(set);

View File

@ -104,9 +104,8 @@ typedef struct drm_tex_region {
#include "i810_drm.h" #include "i810_drm.h"
#include "r128_drm.h" #include "r128_drm.h"
#include "radeon_drm.h" #include "radeon_drm.h"
#ifdef CONFIG_DRM_SIS
#include "sis_drm.h" #include "sis_drm.h"
#endif #include "i830_drm.h"
typedef struct drm_version { typedef struct drm_version {
int version_major; /* Major version */ int version_major; /* Major version */
@ -449,6 +448,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) #define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) #define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) #define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
/* Rage 128 specific ioctls */ /* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) #define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
@ -483,7 +488,6 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) #define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) #define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */ /* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) #define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) #define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
@ -493,6 +497,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) #define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) #define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) #define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
#endif
/* I830 specific ioctls */
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif #endif

View File

@ -104,9 +104,8 @@ typedef struct drm_tex_region {
#include "i810_drm.h" #include "i810_drm.h"
#include "r128_drm.h" #include "r128_drm.h"
#include "radeon_drm.h" #include "radeon_drm.h"
#ifdef CONFIG_DRM_SIS
#include "sis_drm.h" #include "sis_drm.h"
#endif #include "i830_drm.h"
typedef struct drm_version { typedef struct drm_version {
int version_major; /* Major version */ int version_major; /* Major version */
@ -449,6 +448,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) #define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) #define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) #define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
/* Rage 128 specific ioctls */ /* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) #define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
@ -483,7 +488,6 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) #define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) #define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */ /* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) #define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) #define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
@ -493,6 +497,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) #define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) #define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) #define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
#endif
/* I830 specific ioctls */
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif #endif