First DRI release of 3dfx driver.

main
Daryll Strauss 1999-12-05 23:10:37 +00:00
parent a8ab34ed47
commit b6a28bfe98
32 changed files with 9397 additions and 330 deletions

View File

@ -1,6 +1,6 @@
/* xf86drm.c -- User-level interface to DRM device
* Created: Tue Jan 5 08:16:21 1999 by faith@precisioninsight.com
* Revised: Fri Jun 18 09:52:23 1999 by faith@precisioninsight.com
* Revised: Wed Aug 4 07:54:23 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -24,8 +24,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.41 1999/06/21 14:31:20 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.3 1999/06/27 14:08:19 dawes Exp $
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.43 1999/08/04 18:14:43 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.4 1999/09/25 14:37:49 dawes Exp $
*
*/
@ -101,63 +101,6 @@ static char *drmStrdup(const char *s)
}
/* drm_lookup searches file for a line tagged with name, and returns the
value for that tag. If busid is NULL, the file format is that used for
/proc/devices and /proc/misc, or the first part of /proc/drm/devices:
<value> <whitespace> <tag>
If the busid is non-NULL, the file format is the extended format used
for /proc/drm/devices:
<value> <whitespace> <name> <whitespace> <busid>
If both name and busid are non-NULL, both must match. If either is
NULL, then the other is matched.
*/
static int drm_lookup(const char *file, const char *name, const char *busid)
{
FILE *str;
char buf[128];
char *pt;
int name_match;
int busid_match;
char *namept = NULL;
char *busidpt = NULL;
if (!(str = fopen(file, "r"))) return DRM_ERR_NO_DEVICE;
while (fgets(buf, sizeof(buf)-1, str)) {
buf[sizeof(buf)-1] = '\0';
for (pt = buf; *pt && isspace(*pt); ++pt); /* skip whitespace */
for (; *pt && !isspace(*pt); ++pt); /* next space or null */
if (isspace(pt[0]) && pt[1]) {
pt++;
for (; *pt && isspace(*pt); ++pt); /* skip whitespace */
namept = pt;
for (; *pt && !isspace(*pt); ++pt); /* next space or null */
if (isspace(pt[0]) && pt[1]) { /* busid present */
*pt = '\0'; /* proper termination */
pt++;
for (; *pt && isspace(*pt); ++pt); /* skip whitespace */
busidpt = pt;
for (; *pt && !isspace(*pt); ++pt); /* next space or null */
}
*pt = '\0'; /* proper termination */
name_match = name ? 0 : 1; /* match if we don't care */
busid_match = busid ? 0 : 1; /* match if we don't care */
if (name && namept && !strcmp(name, namept)) ++name_match;
if (busid && busidpt && !strcmp(busid, busidpt)) ++busid_match;
if (name_match && busid_match) {
fclose(str);
return atoi(buf); /* stops at whitespace */
}
}
}
fclose(str);
return DRM_ERR_NO_DEVICE;
}
static unsigned long drmGetKeyFromFd(int fd)
{
#ifdef XFree86LOADER
@ -205,123 +148,128 @@ static int drm_open(const char *file)
int drmAvailable(void)
{
if (!access(DRM_PROC_DRM, R_OK)) return 1;
if (!access("/proc/graphics/0", R_OK)) return 1;
return 0;
}
/* drmGetMajor tries to find the major device number for /dev/drm by
searching /proc/devices. A negative value is returned on error. */
static int drmGetMajor(void)
{
int major;
if (!drmAvailable()) return DRM_ERR_NO_DEVICE;
if ((major = drm_lookup(DRM_PROC_DEVICES, DRM_NAME, NULL)) >= 0)
return major;
return DRM_ERR_NO_DEVICE;
}
/* drmGetMinor tries to find the minor device number for name by looking in
/proc/drm/devices. A negative value is retruned on error. */
static int drmGetMinor(const char *name, const char *busid)
{
int minor;
char buf[128];
if (!drmAvailable()) return DRM_ERR_NO_DEVICE;
sprintf(buf, "/proc/%s/%s", DRM_NAME, DRM_DEVICES);
if ((minor = drm_lookup(buf, name, busid))) return minor;
return 0;
}
/* drmOpen looks up the specified name and/or busid in /proc/drm/devices,
and opens the device found. The entry in /dev is created if necessary
(and if root). A file descriptor is returned. On error, the return
value is negative. */
static int drmOpen(const char *name, const char *busid)
static int drmOpenDevice(const char *path, long dev,
mode_t mode, uid_t user, gid_t group)
{
#ifdef XFree86LOADER
struct xf86stat st;
#else
struct stat st;
#endif
char path[128];
int major;
int minor;
dev_t dev;
if (!drmAvailable()) return DRM_ERR_NO_DEVICE;
if ((major = drmGetMajor()) < 0) return major;
if ((minor = drmGetMinor(name,busid)) < 0) return minor;
dev = makedev(major, minor);
if (!stat(path, &st) && st.st_rdev == dev) return drm_open(path);
if (!minor) {
sprintf(path, "/dev/%s", DRM_NAME );
} else {
sprintf(path, "/dev/%s%d", DRM_NAME, minor-1 );
}
/* Check device major/minor for match */
if (!access(path, F_OK)) {
if (stat(path, &st)) return DRM_ERR_NO_ACCESS;
if (st.st_rdev == dev) {
#if defined(XFree86Server)
chmod(path,
xf86ConfigDRI.mode ? xf86ConfigDRI.mode : DRM_DEV_MODE);
chown(path, DRM_DEV_UID,
xf86ConfigDRI.group ? xf86ConfigDRI.group : DRM_DEV_GID);
#endif
#if defined(DRM_USE_MALLOC)
chmod(path, DRM_DEV_MODE);
chown(path, DRM_DEV_UID, DRM_DEV_GID);
#endif
return drm_open(path);
}
}
/* Doesn't exist or match failed, so we
have to be root to create it. */
if (geteuid()) return DRM_ERR_NOT_ROOT;
remove(path);
if (mknod(path, S_IFCHR | DRM_DEV_MODE, dev)) {
if (mknod(path, S_IFCHR, dev)) {
remove(path);
return DRM_ERR_NOT_ROOT;
}
#if defined(XFree86Server)
chmod(path, xf86ConfigDRI.mode ? xf86ConfigDRI.mode : DRM_DEV_MODE);
chown(path, DRM_DEV_UID,
xf86ConfigDRI.group ? xf86ConfigDRI.group : DRM_DEV_GID);
#endif
#if defined(DRM_USE_MALLOC)
chmod(path, DRM_DEV_MODE);
chown(path, DRM_DEV_UID, DRM_DEV_GID);
#endif
chown(path, user, group);
chmod(path, mode);
return drm_open(path);
}
/* drmOpenDRM returns a file descriptor for the main /dev/drm control
device. The entry in /dev is created if necessary (and if root). A
file descriptor is returned. On error, the return value is negative. */
int drmOpenDRM(void)
static int drmOpenByName(const char *name)
{
return drmOpen(DRM_NAME, NULL);
int i;
char proc_name[64];
char dev_name[64];
char buf[512];
mode_t mode = DRM_DEV_MODE;
mode_t dirmode;
gid_t group = DRM_DEV_GID;
uid_t user = DRM_DEV_UID;
int fd;
char *pt;
char *driver = NULL;
char *devstring;
long dev = 0;
int retcode;
#if defined(XFree86Server)
mode = xf86ConfigDRI.mode ? xf86ConfigDRI.mode : DRM_DEV_MODE;
group = xf86ConfigDRI.group ? xf86ConfigDRI.group : DRM_DEV_GID;
#endif
if (!geteuid()) {
dirmode = mode;
if (dirmode & S_IRUSR) dirmode |= S_IXUSR;
if (dirmode & S_IRGRP) dirmode |= S_IXGRP;
if (dirmode & S_IROTH) dirmode |= S_IXOTH;
dirmode &= ~(S_IWGRP | S_IWOTH);
mkdir("/dev/graphics", 0);
chown("/dev/graphics", user, group);
chmod("/dev/graphics", dirmode);
}
for (i = 0; i < 8; i++) {
sprintf(proc_name, "/proc/graphics/%d/name", i);
sprintf(dev_name, "/dev/graphics/card%d", i);
if ((fd = open(proc_name, 0, 0)) >= 0) {
retcode = read(fd, buf, sizeof(buf)-1);
close(fd);
if (retcode) {
buf[retcode-1] = '\0';
for (driver = pt = buf; *pt && *pt != ' '; ++pt)
;
if (*pt) { /* Device is next */
*pt = '\0';
if (!strcmp(driver, name)) { /* Match */
for (devstring = ++pt; *pt && *pt != ' '; ++pt)
;
if (*pt) { /* Found busid */
return drmOpenByBusid(++pt);
} else { /* No busid */
dev = strtol(devstring, NULL, 0);
return drmOpenDevice(dev_name, dev,
mode, user, group);
}
}
}
}
} else remove(dev_name);
}
return -1;
}
static int drmOpenByBusid(const char *busid)
{
int i;
char dev_name[64];
char *buf;
int fd;
for (i = 0; i < 8; i++) {
sprintf(dev_name, "/dev/graphics/card%d", i);
if ((fd = drm_open(dev_name)) >= 0) {
buf = drmGetBusid(fd);
if (buf && !strcmp(buf, busid)) {
drmFreeBusid(buf);
return fd;
}
if (buf) drmFreeBusid(buf);
close(fd);
}
}
return -1;
}
/* drmClose closes the file descriptor returned from drmOpen. */
/* drmOpen looks up the specified name and busid, and opens the device
found. The entry in /dev/graphics is created if necessary (and if root).
A file descriptor is returned. On error, the return value is
negative. */
int drmCloseDRM(int fd)
int drmOpen(const char *name, const char *busid)
{
return close(fd);
if (busid) return drmOpenByBusid(busid);
return drmOpenByName(name);
}
void drmFreeVersion(drmVersionPtr v)
@ -404,137 +352,33 @@ drmVersionPtr drmGetVersion(int fd)
return retval;
}
void drmFreeVersionList(drmListPtr list)
void drmFreeBusid(const char *busid)
{
int i;
if (!list) return;
if (list->version) {
for (i = 0; i < list->count; i++) {
if (list->version[i].name) drmFree(list->version[i].name);
if (list->version[i].date) drmFree(list->version[i].date);
if (list->version[i].desc) drmFree(list->version[i].desc);
}
drmFree(list->version);
}
if (list->capability) drmFree(list->capability);
drmFree(list);
drmFree((void *)busid);
}
static void drmFreeKernelVersionList(drm_list_t *list)
char *drmGetBusid(int fd)
{
int i;
drm_unique_t u;
if (!list) return;
if (list->version) {
for (i = 0; i < list->count; i++) {
if (list->version[i].name) drmFree(list->version[i].name);
if (list->version[i].date) drmFree(list->version[i].date);
if (list->version[i].desc) drmFree(list->version[i].desc);
u.unique_len = 0;
u.unique = NULL;
}
drmFree(list->version);
}
drmFree(list);
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL;
u.unique = drmMalloc(u.unique_len + 1);
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL;
u.unique[u.unique_len] = '\0';
return u.unique;
}
/* drmList obtains a list of all drivers and capabilities via an ioctl. */
drmListPtr drmGetVersionList(int fd)
int drmSetBusid(int fd, const char *busid)
{
drmListPtr retval;
drm_list_t *list = drmMalloc(sizeof(*list));
int i;
drm_unique_t u;
list->count = 0;
u.unique = (char *)busid;
u.unique_len = strlen(busid);
/* First, get the count */
if (ioctl(fd, DRM_IOCTL_LIST, list)) {
drmFreeKernelVersionList(list);
return NULL;
}
/* Next, get the version sizes */
for (i = 0; i < list->count; i++) {
list->version
= drmMalloc(list->count * sizeof(*list->version));
list->version[i].name_len = 0;
list->version[i].name = NULL;
list->version[i].date_len = 0;
list->version[i].date = NULL;
list->version[i].desc_len = 0;
list->version[i].desc = NULL;
}
if (ioctl(fd, DRM_IOCTL_LIST, list)) {
drmFreeKernelVersionList(list);
return NULL;
}
/* Now, allocate space and get the data */
for (i = 0; i < list->count; i++) {
if (list->version[i].name_len)
list->version[i].name = drmMalloc(list->version[i].name_len + 1);
if (list->version[i].date_len)
list->version[i].date = drmMalloc(list->version[i].date_len + 1);
if (list->version[i].desc_len)
list->version[i].desc = drmMalloc(list->version[i].desc_len + 1);
}
if (ioctl(fd, DRM_IOCTL_LIST, list)) {
drmFreeKernelVersionList(list);
return NULL;
}
/* The results might not be null-terminated
strings, so terminate them. */
for (i = 0; i < list->count; i++) {
if (list->version[i].name_len)
list->version[i].name[list->version[i].name_len] = '\0';
if (list->version[i].date_len)
list->version[i].date[list->version[i].date_len] = '\0';
if (list->version[i].desc_len)
list->version[i].desc[list->version[i].desc_len] = '\0';
}
/* Now, copy it all back into the
client-visible data structure... */
retval = drmMalloc(sizeof(*retval));
retval->count = list->count;
retval->version = drmMalloc(list->count * sizeof(*retval->version));
retval->capability = drmMalloc(list->count * sizeof(*retval->capability));
for (i = 0; i < list->count; i++) {
drmCopyVersion(&retval->version[i], &list->version[i]);
}
drmFreeKernelVersionList(list);
return retval;
}
int drmCreateSub(int fd, const char *name, const char *busid)
{
drm_request_t request;
request.device_name = name;
request.device_busid = busid;
if (ioctl(fd, DRM_IOCTL_CREATE, &request)) {
return -errno;
}
return 0;
}
int drmDestroySub(int fd, const char *busid)
{
drm_request_t request;
request.device_busid = busid;
request.device_major = drmGetMajor();
request.device_minor = drmGetMinor(NULL, busid);
if (ioctl(fd, DRM_IOCTL_DESTROY, &request)) {
return -errno;
}
if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) return -errno;
return 0;
}
@ -634,12 +478,7 @@ int drmFreeBufs(int fd, int count, int *list)
return 0;
}
int drmOpenSub(const char *busid)
{
return drmOpen(NULL, busid);
}
int drmCloseSub(int fd)
int drmClose(int fd)
{
unsigned long key = drmGetKeyFromFd(fd);
drmHashEntry *entry = drmGetEntry(fd);
@ -923,58 +762,12 @@ int drmError(int err, const char *label)
return 1;
}
int drmCtlAddCommand(int fd, drmCtlDesc desc, int count, int *inst)
{
drm_control_t ctl;
ctl.func = DRM_ADD_COMMAND;
ctl.irq = 0;
ctl.count = count;
ctl.inst = inst;
switch (desc) {
case DRM_IH_PRE_INST: ctl.desc = _DRM_IH_PRE_INST; break;
case DRM_IH_POST_INST: ctl.desc = _DRM_IH_POST_INST; break;
case DRM_IH_SERVICE: ctl.desc = _DRM_IH_SERVICE; break;
case DRM_IH_PRE_UNINST: ctl.desc = _DRM_IH_PRE_UNINST; break;
case DRM_IH_POST_UNINST: ctl.desc = _DRM_IH_POST_UNINST; break;
case DRM_DMA_DISPATCH: ctl.desc = _DRM_DMA_DISPATCH; break;
case DRM_DMA_READY: ctl.desc = _DRM_DMA_READY; break;
case DRM_DMA_IS_READY: ctl.desc = _DRM_DMA_IS_READY; break;
case DRM_DMA_QUIESCENT: ctl.desc = _DRM_DMA_QUIESCENT; break;
default:
return -EINVAL;
}
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno;
return 0;
}
int drmCtlRemoveCommands(int fd)
{
drm_control_t ctl;
drm_desc_t i;
for (i = 0; i < DRM_DESC_MAX; i++) {
ctl.func = DRM_RM_COMMAND;
ctl.desc = i;
ctl.irq = 0;
ctl.count = 0;
ctl.inst = NULL;
ioctl(fd, DRM_IOCTL_CONTROL, &ctl);
}
return 0;
}
int drmCtlInstHandler(int fd, int irq)
{
drm_control_t ctl;
ctl.func = DRM_INST_HANDLER;
ctl.desc = 0; /* unused */
ctl.irq = irq;
ctl.count = 0;
ctl.inst = NULL;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno;
return 0;
}
@ -984,10 +777,7 @@ int drmCtlUninstHandler(int fd)
drm_control_t ctl;
ctl.func = DRM_UNINST_HANDLER;
ctl.desc = 0; /* unused */
ctl.irq = 0;
ctl.count = 0;
ctl.inst = NULL;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno;
return 0;
}

View File

@ -0,0 +1,28 @@
#
# Makefile for the drm device driver. This driver provides support for
# the Direct Rendering Infrastructure (DRI) in XFree86 4.x.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are now inherited from the
# parent makes..
#
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.2 1999/09/27 14:59:24 dawes Exp $
L_TARGET := libdrm.a
L_OBJS := init.o memory.o proc.o auth.o context.o drawable.o bufs.o \
lists.o lock.o ioctl.o fops.o vm.o dma.o
M_OBJS :=
ifdef CONFIG_DRM_GAMMA
M_OBJS += gamma.o
endif
include $(TOPDIR)/Rules.make
gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ gamma_drv.o gamma_dma.o -L. -ldrm

41
linux-core/README.drm Normal file
View File

@ -0,0 +1,41 @@
The Direct Rendering Manager (drm) is a device-independent kernel-level
device driver that provides support for the XFree86 Direct Rendering
Infrastructure (DRI).
The DRM supports the Direct Rendering Infrastructure (DRI) in four major
ways:
1. The DRM provides synchronized access to the graphics hardware via
the use of an optimized two-tiered lock.
2. The DRM enforces the DRI security policy for access to the graphics
hardware by only allowing authenticated X11 clients access to
restricted regions of memory.
3. The DRM provides a generic DMA engine, complete with multiple
queues and the ability to detect the need for an OpenGL context
switch.
4. The DRM is extensible via the use of small device-specific modules
that rely extensively on the API exported by the DRM module.
Documentation on the DRI is available from:
http://precisioninsight.com/piinsights.html
For specific information about kernel-level support, see:
The Direct Rendering Manager, Kernel Support for the Direct Rendering
Infrastructure
http://precisioninsight.com/dr/drm.html
Hardware Locking for the Direct Rendering Infrastructure
http://precisioninsight.com/dr/locking.html
A Security Analysis of the Direct Rendering Infrastructure
http://precisioninsight.com/dr/security.html
$XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/README.drm,v 1.2 1999/09/27 14:59:24 dawes Exp $

585
linux-core/drmP.h Normal file
View File

@ -0,0 +1,585 @@
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.1 1999/09/25 14:37:59 dawes Exp $
*
*/
#ifndef _DRM_P_H_
#define _DRM_P_H_
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/pci.h>
#include <linux/wrapper.h>
#include <linux/version.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <asm/uaccess.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "drm.h"
#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
also include looping detection. */
#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
#define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
#define DRM_TIME_SLICE (HZ/20) /* Time slice for GLXContexts */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01
#define DRM_FLAG_NOCTX 0x02
#define DRM_MEM_DMA 0
#define DRM_MEM_SAREA 1
#define DRM_MEM_DRIVER 2
#define DRM_MEM_MAGIC 3
#define DRM_MEM_IOCTLS 4
#define DRM_MEM_MAPS 5
#define DRM_MEM_VMAS 6
#define DRM_MEM_BUFS 7
#define DRM_MEM_SEGS 8
#define DRM_MEM_PAGES 9
#define DRM_MEM_FILES 10
#define DRM_MEM_QUEUES 11
#define DRM_MEM_CMDS 12
#define DRM_MEM_MAPPINGS 13
#define DRM_MEM_BUFLISTS 14
/* Backward compatibility section */
#ifndef _PAGE_PWT
/* The name of _PAGE_WT was changed to
_PAGE_PWT in Linux 2.2.6 */
#define _PAGE_PWT _PAGE_WT
#endif
/* Wait queue declarations changes in 2.3.1 */
#ifndef DECLARE_WAITQUEUE
#define DECLARE_WAITQUEUE(w,c) struct wait_queue w = { c, NULL }
typedef struct wait_queue *wait_queue_head_t;
#define init_waitqueue_head(q) *q = NULL;
#endif
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
#define _DRM_CAS(lock,old,new,__ret) \
do { \
int __dummy; /* Can't mark eax as clobbered */ \
__asm__ __volatile__( \
"lock ; cmpxchg %4,%1\n\t" \
"setnz %0" \
: "=d" (__ret), \
"=m" (__drm_dummy_lock(lock)), \
"=a" (__dummy) \
: "2" (old), \
"r" (new)); \
} while (0)
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
#define DRM_MEM_ERROR(area, fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
drm_mem_stats[area].name , ##arg)
#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, arg...) \
do { \
if (drm_flags&DRM_FLAG_DEBUG) \
printk(KERN_DEBUG \
"[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
##arg); \
} while (0)
#else
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#endif
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
#define DRM_PROC_PRINT(fmt, arg...) \
len += sprintf(&buf[len], fmt , ##arg); \
if (len > DRM_PROC_LIMIT) return len;
#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
len += sprintf(&buf[len], fmt , ##arg); \
if (len > DRM_PROC_LIMIT) { ret; return len; }
/* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
typedef struct drm_ioctl_desc {
drm_ioctl_t *func;
int auth_needed;
int root_only;
} drm_ioctl_desc_t;
typedef struct drm_devstate {
pid_t owner; /* X server pid holding x_lock */
} drm_devstate_t;
typedef struct drm_magic_entry {
drm_magic_t magic;
struct drm_file *priv;
struct drm_magic_entry *next;
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
struct vm_area_struct *vma;
struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
typedef struct drm_buf {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int order; /* log-base-2(total) */
int used; /* Amount of buffer in use (for DMA) */
unsigned long offset; /* Byte offset (used internally) */
void *address; /* Address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */
wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */
enum {
DRM_LIST_NONE = 0,
DRM_LIST_FREE = 1,
DRM_LIST_WAIT = 2,
DRM_LIST_PEND = 3,
DRM_LIST_PRIO = 4,
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
#define DRM_DMA_HISTOGRAM_SLOTS 9
#define DRM_DMA_HISTOGRAM_INITIAL 10
#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
typedef struct drm_histogram {
atomic_t total;
atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
} drm_histogram_t;
#endif
/* bufs is one longer than it has to be */
typedef struct drm_waitlist {
int count; /* Number of possible buffers */
drm_buf_t **bufs; /* List of pointers to buffers */
drm_buf_t **rp; /* Read pointer */
drm_buf_t **wp; /* Write pointer */
drm_buf_t **end; /* End pointer */
spinlock_t read_lock;
spinlock_t write_lock;
} drm_waitlist_t;
typedef struct drm_freelist {
int initialized; /* Freelist in use */
atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */
wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */
} drm_freelist_t;
typedef struct drm_buf_entry {
int buf_size;
int buf_count;
drm_buf_t *buflist;
int seg_count;
int page_order;
unsigned long *seglist;
drm_freelist_t freelist;
} drm_buf_entry_t;
typedef struct drm_hw_lock {
__volatile__ unsigned int lock;
char padding[60]; /* Pad to cache line */
} drm_hw_lock_t;
typedef struct drm_file {
int authenticated;
int minor;
pid_t pid;
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_file *next;
struct drm_file *prev;
struct drm_device *dev;
} drm_file_t;
typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */
wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */
wait_queue_head_t write_queue; /* Processes waiting on block_write */
atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */
drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */
wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t;
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t;
typedef struct drm_device_dma {
/* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */
atomic_t total_dmas; /* Total DMA buffers dispatched */
atomic_t total_missed_dma; /* Missed drm_do_dma */
atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
atomic_t total_missed_free; /* Missed drm_free_this_buffer */
atomic_t total_missed_sched;/* Missed drm_dma_schedule */
atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */
drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */
int seg_count;
int page_count;
unsigned long *pagelist;
unsigned long byte_count;
/* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/
wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t;
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
dev_t device; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
int blocked; /* Blocked due to VC switch? */
struct proc_dir_entry *root; /* Root for this device's entries */
/* Locks */
spinlock_t count_lock; /* For inuse, open_count, buf_use */
struct semaphore struct_sem; /* For others */
/* Usage Counters */
int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */
atomic_t vma_count; /* Outstanding vma areas open */
int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */
/* Performance Counters */
atomic_t total_open;
atomic_t total_close;
atomic_t total_ioctl;
atomic_t total_irq; /* Total interruptions */
atomic_t total_ctx; /* Total context switches */
atomic_t total_locks;
atomic_t total_unlocks;
atomic_t total_contends;
atomic_t total_sleeps;
/* Authentication */
drm_file_t *file_first;
drm_file_t *file_last;
drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */
drm_map_t **maplist; /* Vector of pointers to regions */
int map_count; /* Number of mappable regions */
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */
/* DMA queues (contexts) */
int queue_count; /* Number of active DMA queues */
int queue_reserved; /* Number of reserved DMA queues */
int queue_slots; /* Actual length of queuelist */
drm_queue_t **queuelist; /* Vector of pointers to DMA queues */
drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */
int irq; /* Interrupt used by board */
__volatile__ int context_flag; /* Context swapping flag */
__volatile__ int interrupt_flag;/* Interruption handler flag */
__volatile__ int dma_flag; /* DMA dispatch flag */
struct timer_list timer; /* Timer for delaying ctx switch */
wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */
unsigned long last_switch; /* jiffies at last context switch */
struct tq_struct tq;
cycles_t ctx_start;
cycles_t lck_start;
#if DRM_DMA_HISTOGRAM
drm_histogram_t histo;
#endif
/* Callback to X server for context switch
and for heavy-handed reset. */
char buf[DRM_BSZ]; /* Output buffer */
char *buf_rp; /* Read pointer */
char *buf_wp; /* Write pointer */
char *buf_end; /* End pointer */
struct fasync_struct *buf_async;/* Processes waiting for SIGIO */
wait_queue_head_t buf_readers; /* Processes waiting to read */
wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
} drm_device_t;
/* Internal function definitions */
/* Misc. support (init.c) */
extern int drm_flags;
extern void drm_parse_options(char *s);
/* Device support (fops.c) */
extern int drm_open_helper(struct inode *inode, struct file *filp,
drm_device_t *dev);
extern int drm_flush(struct file *filp);
extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_fasync(int fd, struct file *filp, int on);
extern ssize_t drm_read(struct file *filp, char *buf, size_t count,
loff_t *off);
extern int drm_write_string(drm_device_t *dev, const char *s);
/* Mapping support (vm.c) */
extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern void drm_vm_open(struct vm_area_struct *vma);
extern void drm_vm_close(struct vm_area_struct *vma);
extern int drm_mmap_dma(struct file *filp,
struct vm_area_struct *vma);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
/* Proc support (proc.c) */
extern int drm_proc_init(drm_device_t *dev);
extern int drm_proc_cleanup(void);
/* Memory management support (memory.c) */
extern void drm_mem_init(void);
extern int drm_mem_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
extern void *drm_alloc(size_t size, int area);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *drm_strdup(const char *s, int area);
extern void drm_strfree(const char *s, int area);
extern void drm_free(void *pt, size_t size, int area);
extern unsigned long drm_alloc_pages(int order, int area);
extern void drm_free_pages(unsigned long address, int order,
int area);
extern void *drm_ioremap(unsigned long offset, unsigned long size);
extern void drm_ioremapfree(void *pt, unsigned long size);
/* Buffer management support (bufs.c) */
extern int drm_order(unsigned long size);
extern int drm_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_infobufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Buffer list management support (lists.c) */
extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
extern int drm_waitlist_destroy(drm_waitlist_t *bl);
extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
extern int drm_freelist_create(drm_freelist_t *bl, int count);
extern int drm_freelist_destroy(drm_freelist_t *bl);
extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
/* DMA support (gen_dma.c) */
extern void drm_dma_setup(drm_device_t *dev);
extern void drm_dma_takedown(drm_device_t *dev);
extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int drm_context_switch(drm_device_t *dev, int old, int new);
extern int drm_context_switch_complete(drm_device_t *dev, int new);
extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
extern void drm_clear_next_buffer(drm_device_t *dev);
extern int drm_select_queue(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
#if DRM_DMA_HISTOGRAM
extern int drm_histogram_slot(unsigned long count);
extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Misc. IOCTL support (ioctl.c) */
extern int drm_irq_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_getunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_setunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Context IOCTL support (context.c) */
extern int drm_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_modctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Drawable IOCTL support (drawable.c) */
extern int drm_adddraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmdraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Authentication IOCTL support (auth.c) */
extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
drm_magic_t magic);
extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
extern int drm_getmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_authmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Locking IOCTL support (lock.c) */
extern int drm_block(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unblock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_lock_take(__volatile__ unsigned int *lock,
unsigned int context);
extern int drm_lock_transfer(drm_device_t *dev,
__volatile__ unsigned int *lock,
unsigned int context);
extern int drm_lock_free(drm_device_t *dev,
__volatile__ unsigned int *lock,
unsigned int context);
extern int drm_finish(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_flush_unblock(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
drm_lock_flags_t flags);
#endif
#endif

653
linux-core/tdfx_drv.c Normal file
View File

@ -0,0 +1,653 @@
/* tdfx.c -- tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI$
* $XFree86$
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "tdfx_drv.h"
EXPORT_SYMBOL(tdfx_init);
EXPORT_SYMBOL(tdfx_cleanup);
#define TDFX_NAME "tdfx"
#define TDFX_DESC "tdfx"
#define TDFX_DATE "19991009"
#define TDFX_MAJOR 0
#define TDFX_MINOR 0
#define TDFX_PATCHLEVEL 1
static drm_device_t tdfx_device;
drm_ctx_t tdfx_res_ctx;
static struct file_operations tdfx_fops = {
open: tdfx_open,
flush: drm_flush,
release: tdfx_release,
ioctl: tdfx_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice tdfx_misc = {
minor: MISC_DYNAMIC_MINOR,
name: TDFX_NAME,
fops: &tdfx_fops,
};
static drm_ioctl_desc_t tdfx_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
};
#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *tdfx = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("tdfx");
MODULE_PARM(tdfx, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
return tdfx_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
tdfx_cleanup();
}
#endif
#ifndef MODULE
/* tdfx_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init tdfx_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int tdfx_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
tdfx_res_ctx.handle=-1;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int tdfx_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* tdfx_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int tdfx_init(void)
{
int retcode;
drm_device_t *dev = &tdfx_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(tdfx);
#endif
if ((retcode = misc_register(&tdfx_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
dev->name = TDFX_NAME;
drm_mem_init();
drm_proc_init(dev);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
TDFX_NAME,
TDFX_MAJOR,
TDFX_MINOR,
TDFX_PATCHLEVEL,
TDFX_DATE,
tdfx_misc.minor);
return 0;
}
/* tdfx_cleanup is called via cleanup_module at module unload time. */
void tdfx_cleanup(void)
{
drm_device_t *dev = &tdfx_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&tdfx_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
tdfx_takedown(dev);
}
int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = TDFX_MAJOR;
version.version_minor = TDFX_MINOR;
version.version_patchlevel = TDFX_PATCHLEVEL;
DRM_COPY(version.name, TDFX_NAME);
DRM_COPY(version.date, TDFX_DATE);
DRM_COPY(version.desc, TDFX_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int tdfx_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &tdfx_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return tdfx_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int tdfx_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return tdfx_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= TDFX_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &tdfx_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
#if 0
/* dev->queue_count == 0 right now for
tdfx. FIXME? */
if (lock.context < 0 || lock.context >= dev->queue_count)
return -EINVAL;
#endif
if (!ret) {
#if 0
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (lock.context == tdfx_res_ctx.handle &&
j >= 0 && j < DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
lock.context, current->pid, j,
dev->lock.lock_time, jiffies);
current->state = TASK_INTERRUPTIBLE;
current->policy |= SCHED_YIELD;
schedule_timeout(DRM_LOCK_SLICE-j);
DRM_DEBUG("jiffies=%d\n", jiffies);
}
}
#endif
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
#if 1
current->policy |= SCHED_YIELD;
#endif
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != tdfx_res_ctx.handle &&
dev->last_context != tdfx_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
tdfx_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = -EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
#if 0
tdfx_quiescent(dev);
#endif
}
}
#if 0
DRM_ERROR("pid = %5d, old counter = %5ld\n",
current->pid, current->counter);
#endif
if (lock.context != tdfx_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY/4;
}
#if 0
while (current->counter > 25)
current->counter >>= 1; /* decrease time slice */
DRM_ERROR("pid = %5d, new counter = %5ld\n",
current->pid, current->counter);
#endif
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}
int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
/* FIXME: Try to send data to card here */
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if 0
current->policy |= SCHED_YIELD;
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(1000);
#endif
if (lock.context != tdfx_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY;
}
#if 0
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(10);
#endif
return 0;
}

28
linux/Makefile.kernel Normal file
View File

@ -0,0 +1,28 @@
#
# Makefile for the drm device driver. This driver provides support for
# the Direct Rendering Infrastructure (DRI) in XFree86 4.x.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are now inherited from the
# parent makes..
#
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.2 1999/09/27 14:59:24 dawes Exp $
L_TARGET := libdrm.a
L_OBJS := init.o memory.o proc.o auth.o context.o drawable.o bufs.o \
lists.o lock.o ioctl.o fops.o vm.o dma.o
M_OBJS :=
ifdef CONFIG_DRM_GAMMA
M_OBJS += gamma.o
endif
include $(TOPDIR)/Rules.make
gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ gamma_drv.o gamma_dma.o -L. -ldrm

136
linux/Makefile.linux Normal file
View File

@ -0,0 +1,136 @@
# Makefile -- For the Direct Rendering Manager module (drm)
# Created: Mon Jan 4 09:26:53 1999 by faith@precisioninsight.com
# Revised: Thu Oct 7 10:56:13 1999 by faith@precisioninsight.com
#
# Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/Makefile.linux,v 1.23 1999/07/02 17:46:30 faith Exp $
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/Makefile.linux,v 1.3 1999/06/27 14:08:21 dawes Exp $
#
.SUFFIXES:
# **** Start of SMP/MODVERSIONS detection
# *** Setup
LINUX=/usr/src/linux
AUTOCONF=$(LINUX)/include/linux/autoconf.h
# ** SMP
SMP := $(shell \
if grep -q '^\#define.*CONFIG_SMP.*1' $(AUTOCONF); \
then echo 1; else echo 0; fi)
# If that doesn't do automatic detection properly on your system,
# uncomment one of these lines:
#SMP := 0
#SMP := 1
# ** MODVERSIONS
MODVERSIONS := $(shell \
if grep -q '^\#define.*CONFIG_MODVERSIONS.*1' $(AUTOCONF); \
then echo 1; else echo 0; fi)
# If that doesn't do automatic detection properly on your system,
# uncomment one of these lines:
#MODVERSIONS := 0
#MODVERSIONS := 1
# **** End of SMP/MODVERSIONS detection
MODS= gamma.o tdfx.o
LIBS= libdrm.a
PROGS= drmstat
DRMOBJS= init.o memory.o proc.o auth.o context.o drawable.o bufs.o \
lists.o lock.o ioctl.o fops.o vm.o dma.o
DRMHEADERS= drm.h drmP.h
GAMMAOBJS= gamma_drv.o gamma_dma.o
GAMMAHEADERS= gamma_drv.h $(DRMHEADERS)
TDFXOBJS= tdfx_drv.o tdfx_context.o
TDFXHEADERS= tdfx_drv.h $(DRMHEADERS)
PROGOBJS= drmstat.po xf86drm.po xf86drmHash.po xf86drmRandom.po sigio.po
PROGHEADERS= xf86drm.h $(DRMHEADERS)
INC= /usr/include
CFLAGS= -O2 $(WARNINGS)
WARNINGS= -Wall -Wwrite-strings -Wpointer-arith -Wcast-align \
-Wstrict-prototypes -Wshadow -Wnested-externs \
-Winline -Wpointer-arith
MODCFLAGS= $(CFLAGS) -D__KERNEL__ -DMODULE -fomit-frame-pointer
PRGCFLAGS= $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \
-D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \
-I../../../../../../include -I../../../../../../../../include \
-I../../../../../../../../programs/Xserver/hw/xfree86/common
PRGLIBS=
# **** Handle SMP/MODVERSIONS
ifeq ($(SMP),1)
MODCFLAGS += -D__SMP__
endif
ifeq ($(MODVERSIONS),1)
MODCFLAGS += -DMODVERSIONS -include /usr/include/linux/modversions.h
endif
# **** End of configuration
all: $(LIBS) $(MODS) $(PROGS)
libdrm.a: $(DRMOBJS)
-$(RM) -f $@
$(AR) rcs $@ $(DRMOBJS)
gamma.o: $(GAMMAOBJS) $(LIBS)
$(LD) -r $^ -o $@
tdfx.o: $(TDFXOBJS) $(LIBS)
$(LD) -r $^ -o $@
drmstat: $(PROGOBJS)
$(CC) $(PRGCFLAGS) $^ $(PRGLIBS) -o $@
.PHONY: ChangeLog
ChangeLog:
@rm -f Changelog
@rcs2log -i 2 -r -l \
| sed 's,@.*alephnull.com,@precisioninsight.com,' > ChangeLog
# .o files are used for modules
%.o: %.c
$(CC) $(MODCFLAGS) -c $< -o $@
%.po: %.c
$(CC) $(PRGCFLAGS) -DDRM_USE_MALLOC -c $< -o $@
$(DRMOBJS): $(DRMHEADERS)
$(GAMMAOBJS): $(GAMMAHEADERS)
$(TDFXOBJS): $(TDFXHEADERS)
$(PROGOBJS): $(PROGHEADERS)
clean:
rm -f *.o *.po *~ core $(PROGS)

41
linux/README.drm Normal file
View File

@ -0,0 +1,41 @@
The Direct Rendering Manager (drm) is a device-independent kernel-level
device driver that provides support for the XFree86 Direct Rendering
Infrastructure (DRI).
The DRM supports the Direct Rendering Infrastructure (DRI) in four major
ways:
1. The DRM provides synchronized access to the graphics hardware via
the use of an optimized two-tiered lock.
2. The DRM enforces the DRI security policy for access to the graphics
hardware by only allowing authenticated X11 clients access to
restricted regions of memory.
3. The DRM provides a generic DMA engine, complete with multiple
queues and the ability to detect the need for an OpenGL context
switch.
4. The DRM is extensible via the use of small device-specific modules
that rely extensively on the API exported by the DRM module.
Documentation on the DRI is available from:
http://precisioninsight.com/piinsights.html
For specific information about kernel-level support, see:
The Direct Rendering Manager, Kernel Support for the Direct Rendering
Infrastructure
http://precisioninsight.com/dr/drm.html
Hardware Locking for the Direct Rendering Infrastructure
http://precisioninsight.com/dr/locking.html
A Security Analysis of the Direct Rendering Infrastructure
http://precisioninsight.com/dr/security.html
$XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/README.drm,v 1.2 1999/09/27 14:59:24 dawes Exp $

161
linux/auth.c Normal file
View File

@ -0,0 +1,161 @@
/* auth.c -- IOCTLs for authentication -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/auth.c,v 1.4 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/auth.c,v 1.1 1999/09/25 14:37:57 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
static int drm_hash_magic(drm_magic_t magic)
{
return magic & (DRM_HASH_SIZE-1);
}
static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
{
drm_file_t *retval = NULL;
drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic);
down(&dev->struct_sem);
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->priv->authenticated) continue;
if (pt->magic == magic) {
retval = pt->priv;
break;
}
}
up(&dev->struct_sem);
return retval;
}
int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
{
int hash;
drm_magic_entry_t *entry;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry) return -ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
down(&dev->struct_sem);
if (dev->magiclist[hash].tail) {
dev->magiclist[hash].tail->next = entry;
dev->magiclist[hash].tail = entry;
} else {
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
}
up(&dev->struct_sem);
return 0;
}
int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
down(&dev->struct_sem);
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) {
dev->magiclist[hash].head = pt->next;
}
if (dev->magiclist[hash].tail == pt) {
dev->magiclist[hash].tail = prev;
}
if (prev) {
prev->next = pt->next;
}
up(&dev->struct_sem);
return 0;
}
}
up(&dev->struct_sem);
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
return -EINVAL;
}
int drm_getmagic(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
static drm_magic_t sequence = 0;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_auth_t auth;
/* Find unique magic */
if (priv->magic) {
auth.magic = priv->magic;
} else {
spin_lock(&lock);
do {
if (!sequence) ++sequence; /* reserve 0 */
auth.magic = sequence++;
} while (drm_find_file(dev, auth.magic));
spin_unlock(&lock);
priv->magic = auth.magic;
drm_add_magic(dev, priv, auth.magic);
}
DRM_DEBUG("%u\n", auth.magic);
copy_to_user_ret((drm_auth_t *)arg, &auth, sizeof(auth), -EFAULT);
return 0;
}
int drm_authmagic(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_auth_t auth;
drm_file_t *file;
copy_from_user_ret(&auth, (drm_auth_t *)arg, sizeof(auth), -EFAULT);
DRM_DEBUG("%u\n", auth.magic);
if ((file = drm_find_file(dev, auth.magic))) {
file->authenticated = 1;
drm_remove_magic(dev, auth.magic);
return 0;
}
return -EINVAL;
}

527
linux/bufs.c Normal file
View File

@ -0,0 +1,527 @@
/* bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 22:48:10 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.8 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.1 1999/09/25 14:37:57 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "linux/un.h"
/* Compute order. Can be made faster. */
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
for (order = 0, tmp = size; tmp >>= 1; ++order);
if (size & ~(1 << order)) ++order;
return order;
}
int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map;
if (!(filp->f_mode & 3)) return -EACCES; /* Require read/write */
map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
if (!map) return -ENOMEM;
if (copy_from_user(map, (drm_map_t *)arg, sizeof(*map))) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EFAULT;
}
DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type);
if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
map->mtrr = -1;
map->handle = 0;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if (map->offset + map->size < map->offset
|| map->offset < virt_to_phys(high_memory)) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
#ifdef CONFIG_MTRR
if (map->type == _DRM_FRAME_BUFFER
|| (map->flags & _DRM_WRITE_COMBINING)) {
map->mtrr = mtrr_add(map->offset, map->size,
MTRR_TYPE_WRCOMB, 1);
}
#endif
map->handle = drm_ioremap(map->offset, map->size);
break;
case _DRM_SHM:
DRM_DEBUG("%ld %d\n", map->size, drm_order(map->size));
map->handle = (void *)drm_alloc_pages(drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
if (!map->handle) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -ENOMEM;
}
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
dev->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
default:
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
down(&dev->struct_sem);
if (dev->maplist) {
++dev->map_count;
dev->maplist = drm_realloc(dev->maplist,
(dev->map_count-1)
* sizeof(*dev->maplist),
dev->map_count
* sizeof(*dev->maplist),
DRM_MEM_MAPS);
} else {
dev->map_count = 1;
dev->maplist = drm_alloc(dev->map_count*sizeof(*dev->maplist),
DRM_MEM_MAPS);
}
dev->maplist[dev->map_count-1] = map;
up(&dev->struct_sem);
copy_to_user_ret((drm_map_t *)arg, map, sizeof(*map), -EFAULT);
if (map->type != _DRM_SHM) {
copy_to_user_ret(&((drm_map_t *)arg)->handle,
&map->offset,
sizeof(map->offset),
-EFAULT);
}
return 0;
}
int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count;
int order;
int size;
int total;
int page_order;
drm_buf_entry_t *entry;
unsigned long page;
drm_buf_t *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
count = request.count;
order = drm_order(request.size);
size = 1 << order;
DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
request.count, request.size, size, order, dev->queue_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
alignment = (request.flags & DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
DRM_MEM_SEGS);
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->seglist, 0, count * sizeof(*entry->seglist));
dma->pagelist = drm_realloc(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
entry->seglist[entry->seg_count++] = page;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i);
dma->pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
atomic_dec(&dev->buf_alloc);
return 0;
}
int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
int i;
int count;
if (!dma) return -EINVAL;
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_info_t *)arg,
sizeof(request),
-EFAULT);
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
}
DRM_DEBUG("count = %d\n", count);
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
copy_to_user_ret(&request.list[count].count,
&dma->bufs[i].buf_count,
sizeof(dma->bufs[0]
.buf_count),
-EFAULT);
copy_to_user_ret(&request.list[count].size,
&dma->bufs[i].buf_size,
sizeof(dma->bufs[0].buf_size),
-EFAULT);
copy_to_user_ret(&request.list[count].low_mark,
&dma->bufs[i]
.freelist.low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark),
-EFAULT);
copy_to_user_ret(&request.list[count]
.high_mark,
&dma->bufs[i]
.freelist.high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark),
-EFAULT);
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
}
request.count = count;
copy_to_user_ret((drm_buf_info_t *)arg,
&request,
sizeof(request),
-EFAULT);
return 0;
}
int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int order;
drm_buf_entry_t *entry;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
order = drm_order(request.size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
entry = &dma->bufs[order];
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
return -EINVAL;
if (request.high_mark < 0 || request.high_mark > entry->buf_count)
return -EINVAL;
entry->freelist.low_mark = request.low_mark;
entry->freelist.high_mark = request.high_mark;
return 0;
}
int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
int i;
int idx;
drm_buf_t *buf;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_free_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
copy_from_user_ret(&idx,
&request.list[i],
sizeof(idx),
-EFAULT);
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d freeing buffer owned by %d\n",
current->pid, buf->pid);
return -EINVAL;
}
drm_free_buffer(dev, buf);
}
return 0;
}
int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
if (request.count >= dma->buf_count) {
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
return retcode;
}

308
linux/context.c Normal file
View File

@ -0,0 +1,308 @@
/* context.c -- IOCTLs for contexts and DMA queues -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 11:32:09 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/context.c,v 1.5 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/context.c,v 1.1 1999/09/25 14:37:58 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
static int drm_init_queue(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
DRM_DEBUG("\n");
if (atomic_read(&q->use_count) != 1
|| atomic_read(&q->finalization)
|| atomic_read(&q->block_count)) {
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count));
}
atomic_set(&q->finalization, 0);
atomic_set(&q->block_count, 0);
atomic_set(&q->block_read, 0);
atomic_set(&q->block_write, 0);
atomic_set(&q->total_queued, 0);
atomic_set(&q->total_flushed, 0);
atomic_set(&q->total_locks, 0);
init_waitqueue_head(&q->write_queue);
init_waitqueue_head(&q->read_queue);
init_waitqueue_head(&q->flush_queue);
q->flags = ctx->flags;
drm_waitlist_create(&q->waitlist, dev->dma->buf_count);
return 0;
}
/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
disappear (so all deallocation must be done after IOCTLs are off)
2) dev->queue_count < dev->queue_slots
3) dev->queuelist[i].use_count == 0 and
dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
2) dev->queue_count < dev->queue_slots */
static int drm_alloc_queue(drm_device_t *dev)
{
int i;
drm_queue_t *queue;
int oldslots;
int newslots;
/* Check for a free queue */
for (i = 0; i < dev->queue_count; i++) {
atomic_inc(&dev->queuelist[i]->use_count);
if (atomic_read(&dev->queuelist[i]->use_count) == 1
&& !atomic_read(&dev->queuelist[i]->finalization)) {
DRM_DEBUG("%d (free)\n", i);
return i;
}
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
down(&dev->struct_sem);
queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
++dev->queue_count;
if (dev->queue_count >= dev->queue_slots) {
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
if (!dev->queue_slots) dev->queue_slots = 1;
dev->queue_slots *= 2;
newslots = dev->queue_slots * sizeof(*dev->queuelist);
dev->queuelist = drm_realloc(dev->queuelist,
oldslots,
newslots,
DRM_MEM_QUEUES);
if (!dev->queuelist) {
up(&dev->struct_sem);
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
}
dev->queuelist[dev->queue_count-1] = queue;
up(&dev->struct_sem);
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
return dev->queue_count - 1;
}
int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
return 0;
}
int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
ctx.handle = drm_alloc_queue(dev);
}
drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
return 0;
}
int drm_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
if (DRM_BUFCOUNT(&q->waitlist)) {
atomic_dec(&q->use_count);
return -EBUSY;
}
q->flags = ctx.flags;
atomic_dec(&q->use_count);
return 0;
}
int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
ctx.flags = q->flags;
atomic_dec(&q->use_count);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
return 0;
}
int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
return drm_context_switch(dev, dev->last_context, ctx.handle);
}
int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
drm_context_switch_complete(dev, ctx.handle);
return 0;
}
int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
/* Remove queued buffers */
while ((buf = drm_waitlist_get(&q->waitlist))) {
drm_free_buffer(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wake_up_interruptible(&q->read_queue);
wake_up_interruptible(&q->write_queue);
wake_up_interruptible(&q->flush_queue);
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
return 0;
}

531
linux/dma.c Normal file
View File

@ -0,0 +1,531 @@
/* dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
* Revised: Thu Sep 16 12:55:39 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.7 1999/09/16 16:56:18 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.1 1999/09/25 14:37:58 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include <linux/interrupt.h> /* For task queue support */
void drm_dma_setup(drm_device_t *dev)
{
int i;
dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
memset(dev->dma, 0, sizeof(*dev->dma));
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
}
void drm_dma_takedown(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
int i, j;
if (!dma) return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
drm_free_pages(dma->bufs[i].seglist[j],
dma->bufs[i].page_order,
DRM_MEM_DMA);
}
drm_free(dma->bufs[i].buflist,
dma->buf_count
* sizeof(*dma->bufs[0].buflist),
DRM_MEM_BUFS);
drm_free(dma->bufs[i].seglist,
dma->buf_count
* sizeof(*dma->bufs[0].seglist),
DRM_MEM_SEGS);
drm_freelist_destroy(&dma->bufs[i].freelist);
}
}
if (dma->buflist) {
drm_free(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
DRM_MEM_BUFS);
}
if (dma->pagelist) {
drm_free(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
DRM_MEM_PAGES);
}
drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
dev->dma = NULL;
}
#if DRM_DMA_HISTOGRAM
/* This is slow, but is useful for debugging. */
int drm_histogram_slot(unsigned long count)
{
int value = DRM_DMA_HISTOGRAM_INITIAL;
int slot;
for (slot = 0;
slot < DRM_DMA_HISTOGRAM_SLOTS;
++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
if (count < value) return slot;
}
return DRM_DMA_HISTOGRAM_SLOTS - 1;
}
void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
{
cycles_t queued_to_dispatched;
cycles_t dispatched_to_completed;
cycles_t completed_to_freed;
int q2d, d2c, c2f, q2c, q2f;
if (buf->time_queued) {
queued_to_dispatched = (buf->time_dispatched
- buf->time_queued);
dispatched_to_completed = (buf->time_completed
- buf->time_dispatched);
completed_to_freed = (buf->time_freed
- buf->time_completed);
q2d = drm_histogram_slot(queued_to_dispatched);
d2c = drm_histogram_slot(dispatched_to_completed);
c2f = drm_histogram_slot(completed_to_freed);
q2c = drm_histogram_slot(queued_to_dispatched
+ dispatched_to_completed);
q2f = drm_histogram_slot(queued_to_dispatched
+ dispatched_to_completed
+ completed_to_freed);
atomic_inc(&dev->histo.total);
atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
atomic_inc(&dev->histo.completed_to_freed[c2f]);
atomic_inc(&dev->histo.queued_to_completed[q2c]);
atomic_inc(&dev->histo.queued_to_freed[q2f]);
}
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
}
#endif
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
{
drm_device_dma_t *dma = dev->dma;
if (!buf) return;
buf->waiting = 0;
buf->pending = 0;
buf->pid = 0;
buf->used = 0;
#if DRM_DMA_HISTOGRAM
buf->time_completed = get_cycles();
#endif
if (waitqueue_active(&buf->dma_wait)) {
wake_up_interruptible(&buf->dma_wait);
} else {
/* If processes are waiting, the last one
to wake will put the buffer on the free
list. If no processes are waiting, we
put the buffer on the freelist here. */
drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
}
}
void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
{
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->pid == pid) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
int drm_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
drm_queue_t *q;
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new >= dev->queue_count) {
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
q = dev->queuelist[new];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
atomic_dec(&q->use_count);
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
if (drm_flags & DRM_FLAG_NOCTX) {
drm_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
atomic_dec(&q->use_count);
return 0;
}
int drm_context_switch_complete(drm_device_t *dev, int new)
{
drm_device_dma_t *dma = dev->dma;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("Cannot free lock\n");
}
}
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up_interruptible(&dev->context_wait);
return 0;
}
void drm_clear_next_buffer(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
dma->next_buffer = NULL;
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
wake_up_interruptible(&dma->next_queue->flush_queue);
}
dma->next_queue = NULL;
}
int drm_select_queue(drm_device_t *dev, void (*wrapper)(unsigned long))
{
int i;
int candidate = -1;
int j = jiffies;
if (!dev) {
DRM_ERROR("No device\n");
return -1;
}
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
/* This only happens between the time the
interrupt is initialized and the time
the queues are initialized. */
return -1;
}
/* Doing "while locked" DMA? */
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
return DRM_KERNEL_CONTEXT;
}
/* If there are buffers on the last_context
queue, and we have not been executing
this context very long, continue to
execute this context. */
if (dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j
&& DRM_WAITCOUNT(dev, dev->last_context)) {
return dev->last_context;
}
/* Otherwise, find a candidate */
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
if (candidate < 0) {
for (i = 0; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
}
if (wrapper
&& candidate >= 0
&& candidate != dev->last_context
&& dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j) {
if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
del_timer(&dev->timer);
dev->timer.function = wrapper;
dev->timer.data = (unsigned long)dev;
dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
add_timer(&dev->timer);
}
return -1;
}
return candidate;
}
int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
{
int i;
drm_queue_t *q;
drm_buf_t *buf;
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
int context = dev->lock.hw_lock->lock;
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return -EINVAL;
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
DRM_ERROR("Lock held by %d while %d makes"
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return -EINVAL;
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;
} else {
q = dev->queuelist[d->context];
}
atomic_inc(&q->use_count);
if (atomic_read(&q->block_write)) {
current->state = TASK_INTERRUPTIBLE;
add_wait_queue(&q->write_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
if (!atomic_read(&q->block_write)) break;
schedule();
if (signal_pending(current)) {
atomic_dec(&q->use_count);
return -EINTR;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->write_queue, &entry);
}
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[ idx ];
if (buf->pid != current->pid) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
return -EINVAL;
}
if (buf->list != DRM_LIST_NONE) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
}
buf->used = d->send_sizes[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return -EINVAL;
}
if (buf->waiting) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return -EINVAL;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
|| atomic_read(&q->finalization)) {
drm_free_buffer(dev, buf);
} else {
drm_waitlist_put(&q->waitlist, buf);
atomic_inc(&q->total_queued);
}
}
atomic_dec(&q->use_count);
return 0;
}
static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
int order)
{
int i;
drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma;
for (i = d->granted_count; i < d->request_count; i++) {
buf = drm_freelist_get(&dma->bufs[order].freelist,
d->flags & _DRM_DMA_WAIT);
if (!buf) break;
if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
buf->idx,
buf->pid,
buf->waiting,
buf->pending);
}
buf->pid = current->pid;
copy_to_user_ret(&d->request_indices[i],
&buf->idx,
sizeof(buf->idx),
-EFAULT);
copy_to_user_ret(&d->request_sizes[i],
&buf->total,
sizeof(buf->total),
-EFAULT);
++d->granted_count;
}
return 0;
}
int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
{
int order;
int retcode = 0;
int tmp_order;
order = drm_order(dma->request_size);
dma->granted_count = 0;
retcode = drm_dma_get_buffers_of_order(dev, dma, order);
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
for (tmp_order = order - 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order >= DRM_MIN_ORDER;
--tmp_order) {
retcode = drm_dma_get_buffers_of_order(dev, dma,
tmp_order);
}
}
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
for (tmp_order = order + 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order <= DRM_MAX_ORDER;
++tmp_order) {
retcode = drm_dma_get_buffers_of_order(dev, dma,
tmp_order);
}
}
return 0;
}

50
linux/drawable.c Normal file
View File

@ -0,0 +1,50 @@
/* drawable.c -- IOCTLs for drawables -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 09:27:03 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drawable.c,v 1.3 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drawable.c,v 1.1 1999/09/25 14:37:58 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_adddraw(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_draw_t draw;
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
copy_to_user_ret((drm_draw_t *)arg, &draw, sizeof(draw), -EFAULT);
return 0;
}
int drm_rmdraw(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
return 0; /* NOOP */
}

277
linux/drm.h Normal file
View File

@ -0,0 +1,277 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 13:08:18 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.1 1999/09/25 14:37:58 dawes Exp $
*
*/
#ifndef _DRM_H_
#define _DRM_H_
#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_PROC_DEVICES "/proc/devices"
#define DRM_PROC_MISC "/proc/misc"
#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
#define DRM_NAME "drm" /* Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
typedef struct drm_version {
int version_major; /* Major version */
int version_minor; /* Minor version */
int version_patchlevel;/* Patch level */
size_t name_len; /* Length of name buffer */
char *name; /* Name of driver */
size_t date_len; /* Length of date buffer */
char *date; /* User-space buffer to hold date */
size_t desc_len; /* Length of desc buffer */
char *desc; /* User-space buffer to hold desc */
} drm_version_t;
typedef struct drm_unique {
size_t unique_len; /* Length of unique */
char *unique; /* Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /* Length of user-space structures */
drm_version_t *version;
} drm_list_t;
typedef struct drm_block {
int unused;
} drm_block_t;
typedef struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
} drm_control_t;
typedef enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
_DRM_REGISTERS = 1, /* no caching, no core dump */
_DRM_SHM = 2 /* shared, cached */
} drm_map_type_t;
typedef enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
} drm_map_flags_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
drm_map_type_t type; /* Type of memory to map */
drm_map_flags_t flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* MTRR slot used */
/* Private data */
} drm_map_t;
typedef enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */
} drm_lock_flags_t;
typedef struct drm_lock {
int context;
drm_lock_flags_t flags;
} drm_lock_t;
typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched.
Note, the buffer may not yet have
been processed by the hardware --
getting a hardware lock with the
hardware quiescent will ensure
that the buffer has been
processed. */
_DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /* Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */
_DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */
} drm_dma_flags_t;
typedef struct drm_buf_desc {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
enum {
DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */
} flags;
} drm_buf_desc_t;
typedef struct drm_buf_info {
int count; /* Entries in list */
drm_buf_desc_t *list;
} drm_buf_info_t;
typedef struct drm_buf_free {
int count;
int *list;
} drm_buf_free_t;
typedef struct drm_buf_pub {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int used; /* Amount of buffer in use (for DMA) */
void *address; /* Address of buffer */
} drm_buf_pub_t;
typedef struct drm_buf_map {
int count; /* Length of buflist */
void *virtual; /* Mmaped area in user-virtual */
drm_buf_pub_t *list; /* Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int context; /* Context handle */
int send_count; /* Number of buffers to send */
int *send_indices; /* List of handles to buffers */
int *send_sizes; /* Lengths of data to send */
drm_dma_flags_t flags; /* Flags */
int request_count; /* Number of buffers requested */
int request_size; /* Desired size for buffers */
int *request_indices; /* Buffer information */
int *request_sizes;
int granted_count; /* Number of buffers granted */
} drm_dma_t;
typedef enum {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
} drm_ctx_flags_t;
typedef struct drm_ctx {
drm_context_t handle;
drm_ctx_flags_t flags;
} drm_ctx_t;
typedef struct drm_ctx_res {
int count;
drm_ctx_t *contexts;
} drm_ctx_res_t;
typedef struct drm_draw {
drm_drawable_t handle;
} drm_draw_t;
typedef struct drm_auth {
drm_magic_t magic;
} drm_auth_t;
typedef struct drm_irq_busid {
int irq;
int busnum;
int devnum;
int funcnum;
} drm_irq_busid_t;
#define DRM_IOCTL_BASE 'd'
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
#endif

585
linux/drmP.h Normal file
View File

@ -0,0 +1,585 @@
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.1 1999/09/25 14:37:59 dawes Exp $
*
*/
#ifndef _DRM_P_H_
#define _DRM_P_H_
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/pci.h>
#include <linux/wrapper.h>
#include <linux/version.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <asm/uaccess.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "drm.h"
#define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then
also include looping detection. */
#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
#define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
#define DRM_TIME_SLICE (HZ/20) /* Time slice for GLXContexts */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01
#define DRM_FLAG_NOCTX 0x02
#define DRM_MEM_DMA 0
#define DRM_MEM_SAREA 1
#define DRM_MEM_DRIVER 2
#define DRM_MEM_MAGIC 3
#define DRM_MEM_IOCTLS 4
#define DRM_MEM_MAPS 5
#define DRM_MEM_VMAS 6
#define DRM_MEM_BUFS 7
#define DRM_MEM_SEGS 8
#define DRM_MEM_PAGES 9
#define DRM_MEM_FILES 10
#define DRM_MEM_QUEUES 11
#define DRM_MEM_CMDS 12
#define DRM_MEM_MAPPINGS 13
#define DRM_MEM_BUFLISTS 14
/* Backward compatibility section */
#ifndef _PAGE_PWT
/* The name of _PAGE_WT was changed to
_PAGE_PWT in Linux 2.2.6 */
#define _PAGE_PWT _PAGE_WT
#endif
/* Wait queue declarations changes in 2.3.1 */
#ifndef DECLARE_WAITQUEUE
#define DECLARE_WAITQUEUE(w,c) struct wait_queue w = { c, NULL }
typedef struct wait_queue *wait_queue_head_t;
#define init_waitqueue_head(q) *q = NULL;
#endif
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
#define _DRM_CAS(lock,old,new,__ret) \
do { \
int __dummy; /* Can't mark eax as clobbered */ \
__asm__ __volatile__( \
"lock ; cmpxchg %4,%1\n\t" \
"setnz %0" \
: "=d" (__ret), \
"=m" (__drm_dummy_lock(lock)), \
"=a" (__dummy) \
: "2" (old), \
"r" (new)); \
} while (0)
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
#define DRM_MEM_ERROR(area, fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
drm_mem_stats[area].name , ##arg)
#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, arg...) \
do { \
if (drm_flags&DRM_FLAG_DEBUG) \
printk(KERN_DEBUG \
"[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
##arg); \
} while (0)
#else
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#endif
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
#define DRM_PROC_PRINT(fmt, arg...) \
len += sprintf(&buf[len], fmt , ##arg); \
if (len > DRM_PROC_LIMIT) return len;
#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
len += sprintf(&buf[len], fmt , ##arg); \
if (len > DRM_PROC_LIMIT) { ret; return len; }
/* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
typedef struct drm_ioctl_desc {
drm_ioctl_t *func;
int auth_needed;
int root_only;
} drm_ioctl_desc_t;
typedef struct drm_devstate {
pid_t owner; /* X server pid holding x_lock */
} drm_devstate_t;
typedef struct drm_magic_entry {
drm_magic_t magic;
struct drm_file *priv;
struct drm_magic_entry *next;
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
struct vm_area_struct *vma;
struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
typedef struct drm_buf {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int order; /* log-base-2(total) */
int used; /* Amount of buffer in use (for DMA) */
unsigned long offset; /* Byte offset (used internally) */
void *address; /* Address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */
wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */
enum {
DRM_LIST_NONE = 0,
DRM_LIST_FREE = 1,
DRM_LIST_WAIT = 2,
DRM_LIST_PEND = 3,
DRM_LIST_PRIO = 4,
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
#define DRM_DMA_HISTOGRAM_SLOTS 9
#define DRM_DMA_HISTOGRAM_INITIAL 10
#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
typedef struct drm_histogram {
atomic_t total;
atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
} drm_histogram_t;
#endif
/* bufs is one longer than it has to be */
typedef struct drm_waitlist {
int count; /* Number of possible buffers */
drm_buf_t **bufs; /* List of pointers to buffers */
drm_buf_t **rp; /* Read pointer */
drm_buf_t **wp; /* Write pointer */
drm_buf_t **end; /* End pointer */
spinlock_t read_lock;
spinlock_t write_lock;
} drm_waitlist_t;
typedef struct drm_freelist {
int initialized; /* Freelist in use */
atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */
wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */
} drm_freelist_t;
typedef struct drm_buf_entry {
int buf_size;
int buf_count;
drm_buf_t *buflist;
int seg_count;
int page_order;
unsigned long *seglist;
drm_freelist_t freelist;
} drm_buf_entry_t;
typedef struct drm_hw_lock {
__volatile__ unsigned int lock;
char padding[60]; /* Pad to cache line */
} drm_hw_lock_t;
typedef struct drm_file {
int authenticated;
int minor;
pid_t pid;
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_file *next;
struct drm_file *prev;
struct drm_device *dev;
} drm_file_t;
typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */
wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */
wait_queue_head_t write_queue; /* Processes waiting on block_write */
atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */
drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */
wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t;
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t;
typedef struct drm_device_dma {
/* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */
atomic_t total_dmas; /* Total DMA buffers dispatched */
atomic_t total_missed_dma; /* Missed drm_do_dma */
atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
atomic_t total_missed_free; /* Missed drm_free_this_buffer */
atomic_t total_missed_sched;/* Missed drm_dma_schedule */
atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */
drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */
int seg_count;
int page_count;
unsigned long *pagelist;
unsigned long byte_count;
/* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/
wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t;
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
dev_t device; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
int blocked; /* Blocked due to VC switch? */
struct proc_dir_entry *root; /* Root for this device's entries */
/* Locks */
spinlock_t count_lock; /* For inuse, open_count, buf_use */
struct semaphore struct_sem; /* For others */
/* Usage Counters */
int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */
atomic_t vma_count; /* Outstanding vma areas open */
int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */
/* Performance Counters */
atomic_t total_open;
atomic_t total_close;
atomic_t total_ioctl;
atomic_t total_irq; /* Total interruptions */
atomic_t total_ctx; /* Total context switches */
atomic_t total_locks;
atomic_t total_unlocks;
atomic_t total_contends;
atomic_t total_sleeps;
/* Authentication */
drm_file_t *file_first;
drm_file_t *file_last;
drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */
drm_map_t **maplist; /* Vector of pointers to regions */
int map_count; /* Number of mappable regions */
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */
/* DMA queues (contexts) */
int queue_count; /* Number of active DMA queues */
int queue_reserved; /* Number of reserved DMA queues */
int queue_slots; /* Actual length of queuelist */
drm_queue_t **queuelist; /* Vector of pointers to DMA queues */
drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */
int irq; /* Interrupt used by board */
__volatile__ int context_flag; /* Context swapping flag */
__volatile__ int interrupt_flag;/* Interruption handler flag */
__volatile__ int dma_flag; /* DMA dispatch flag */
struct timer_list timer; /* Timer for delaying ctx switch */
wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */
unsigned long last_switch; /* jiffies at last context switch */
struct tq_struct tq;
cycles_t ctx_start;
cycles_t lck_start;
#if DRM_DMA_HISTOGRAM
drm_histogram_t histo;
#endif
/* Callback to X server for context switch
and for heavy-handed reset. */
char buf[DRM_BSZ]; /* Output buffer */
char *buf_rp; /* Read pointer */
char *buf_wp; /* Write pointer */
char *buf_end; /* End pointer */
struct fasync_struct *buf_async;/* Processes waiting for SIGIO */
wait_queue_head_t buf_readers; /* Processes waiting to read */
wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
} drm_device_t;
/* Internal function definitions */
/* Misc. support (init.c) */
extern int drm_flags;
extern void drm_parse_options(char *s);
/* Device support (fops.c) */
extern int drm_open_helper(struct inode *inode, struct file *filp,
drm_device_t *dev);
extern int drm_flush(struct file *filp);
extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_fasync(int fd, struct file *filp, int on);
extern ssize_t drm_read(struct file *filp, char *buf, size_t count,
loff_t *off);
extern int drm_write_string(drm_device_t *dev, const char *s);
/* Mapping support (vm.c) */
extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern void drm_vm_open(struct vm_area_struct *vma);
extern void drm_vm_close(struct vm_area_struct *vma);
extern int drm_mmap_dma(struct file *filp,
struct vm_area_struct *vma);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
/* Proc support (proc.c) */
extern int drm_proc_init(drm_device_t *dev);
extern int drm_proc_cleanup(void);
/* Memory management support (memory.c) */
extern void drm_mem_init(void);
extern int drm_mem_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
extern void *drm_alloc(size_t size, int area);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *drm_strdup(const char *s, int area);
extern void drm_strfree(const char *s, int area);
extern void drm_free(void *pt, size_t size, int area);
extern unsigned long drm_alloc_pages(int order, int area);
extern void drm_free_pages(unsigned long address, int order,
int area);
extern void *drm_ioremap(unsigned long offset, unsigned long size);
extern void drm_ioremapfree(void *pt, unsigned long size);
/* Buffer management support (bufs.c) */
extern int drm_order(unsigned long size);
extern int drm_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_infobufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Buffer list management support (lists.c) */
extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
extern int drm_waitlist_destroy(drm_waitlist_t *bl);
extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
extern int drm_freelist_create(drm_freelist_t *bl, int count);
extern int drm_freelist_destroy(drm_freelist_t *bl);
extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
/* DMA support (gen_dma.c) */
extern void drm_dma_setup(drm_device_t *dev);
extern void drm_dma_takedown(drm_device_t *dev);
extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int drm_context_switch(drm_device_t *dev, int old, int new);
extern int drm_context_switch_complete(drm_device_t *dev, int new);
extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
extern void drm_clear_next_buffer(drm_device_t *dev);
extern int drm_select_queue(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
#if DRM_DMA_HISTOGRAM
extern int drm_histogram_slot(unsigned long count);
extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Misc. IOCTL support (ioctl.c) */
extern int drm_irq_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_getunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_setunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Context IOCTL support (context.c) */
extern int drm_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_modctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Drawable IOCTL support (drawable.c) */
extern int drm_adddraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmdraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Authentication IOCTL support (auth.c) */
extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
drm_magic_t magic);
extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
extern int drm_getmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_authmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Locking IOCTL support (lock.c) */
extern int drm_block(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unblock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_lock_take(__volatile__ unsigned int *lock,
unsigned int context);
extern int drm_lock_transfer(drm_device_t *dev,
__volatile__ unsigned int *lock,
unsigned int context);
extern int drm_lock_free(drm_device_t *dev,
__volatile__ unsigned int *lock,
unsigned int context);
extern int drm_finish(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_flush_unblock(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
drm_lock_flags_t flags);
#endif
#endif

218
linux/fops.c Normal file
View File

@ -0,0 +1,218 @@
/* fops.c -- File operations for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:48:59 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.3 1999/08/20 15:36:45 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/fops.c,v 1.1 1999/09/25 14:37:59 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
/* drm_open is called whenever a process opens /dev/drm. */
int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
{
kdev_t minor = MINOR(inode->i_rdev);
drm_file_t *priv;
if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->uid = current->euid;
priv->pid = current->pid;
priv->minor = minor;
priv->dev = dev;
priv->ioctl_count = 0;
priv->authenticated = capable(CAP_SYS_ADMIN);
down(&dev->struct_sem);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
}
up(&dev->struct_sem);
return 0;
}
int drm_flush(struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d, f_count = %d\n",
current->pid, dev->device, dev->open_count, filp->f_count);
return 0;
}
/* drm_release is called whenever a process closes /dev/drm*. Linux calls
this only if any mappings have been closed. */
int drm_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n",
current->pid, dev->device, dev->open_count);
if (_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.pid == current->pid) {
DRM_ERROR("Process %d dead, freeing lock for context %d\n",
current->pid,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev,
&dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
}
drm_reclaim_buffers(dev, priv->pid);
drm_fasync(-1, filp, 0);
down(&dev->struct_sem);
if (priv->prev) priv->prev->next = priv->next;
else dev->file_first = priv->next;
if (priv->next) priv->next->prev = priv->prev;
else dev->file_last = priv->prev;
up(&dev->struct_sem);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
return 0;
}
int drm_fasync(int fd, struct file *filp, int on)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%x\n", fd, dev->device);
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
if (retcode < 0) return retcode;
return 0;
}
/* The drm_read and drm_write_string code (especially that which manages
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
ssize_t drm_read(struct file *filp, char *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int left;
int avail;
int send;
int cur;
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
interruptible_sleep_on(&dev->buf_readers);
if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n");
return -ERESTARTSYS;
}
DRM_DEBUG(" awake\n");
}
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
send = DRM_MIN(avail, count);
while (send) {
if (dev->buf_wp > dev->buf_rp) {
cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
copy_to_user_ret(buf, dev->buf_rp, cur, -EINVAL);
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;
}
wake_up_interruptible(&dev->buf_writers);
return DRM_MIN(avail, count);;
}
int drm_write_string(drm_device_t *dev, const char *s)
{
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
int send = strlen(s);
int count;
DRM_DEBUG("%d left, %d to send (%p, %p)\n",
left, send, dev->buf_rp, dev->buf_wp);
if (left == 1 || dev->buf_wp != dev->buf_rp) {
DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
left,
dev->buf_wp,
dev->buf_rp);
}
while (send) {
if (dev->buf_wp >= dev->buf_rp) {
count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
if (count == left) --count; /* Leave a hole */
} else {
count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
}
strncpy(dev->buf_wp, s, count);
dev->buf_wp += count;
if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
send -= count;
}
if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO);
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
}

802
linux/gamma_dma.c Normal file
View File

@ -0,0 +1,802 @@
/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
* Revised: Thu Sep 16 12:55:37 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_dma.c,v 1.9 1999/09/16 16:56:18 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_dma.c,v 1.1 1999/09/25 14:38:00 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "gamma_drv.h"
#include <linux/interrupt.h> /* For task queue support */
/* WARNING!!! MAGIC NUMBER!!! The number of regions already added to the
kernel must be specified here. Currently, the number is 2. This must
match the order the X server uses for instantiating register regions ,
or must be passed in a new ioctl. */
#define GAMMA_REG(reg) \
(2 \
+ ((reg < 0x1000) \
? 0 \
: ((reg < 0x10000) ? 1 : ((reg < 0x11000) ? 2 : 3))))
#define GAMMA_OFF(reg) \
((reg < 0x1000) \
? reg \
: ((reg < 0x10000) \
? (reg - 0x1000) \
: ((reg < 0x11000) \
? (reg - 0x10000) \
: (reg - 0x11000))))
#define GAMMA_BASE(reg) ((unsigned long)dev->maplist[GAMMA_REG(reg)]->handle)
#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
#define GAMMA_READ(reg) GAMMA_DEREF(reg)
#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
#define GAMMA_BROADCASTMASK 0x9378
#define GAMMA_COMMANDINTENABLE 0x0c48
#define GAMMA_DMAADDRESS 0x0028
#define GAMMA_DMACOUNT 0x0030
#define GAMMA_FILTERMODE 0x8c00
#define GAMMA_GCOMMANDINTFLAGS 0x0c50
#define GAMMA_GCOMMANDMODE 0x0c40
#define GAMMA_GCOMMANDSTATUS 0x0c60
#define GAMMA_GDELAYTIMER 0x0c38
#define GAMMA_GDMACONTROL 0x0060
#define GAMMA_GINTENABLE 0x0808
#define GAMMA_GINTFLAGS 0x0810
#define GAMMA_INFIFOSPACE 0x0018
#define GAMMA_OUTFIFOWORDS 0x0020
#define GAMMA_OUTPUTFIFO 0x2000
#define GAMMA_SYNC 0x8c40
#define GAMMA_SYNC_TAG 0x0188
static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
{
GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address));
while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
;
GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
}
static inline void gamma_dma_quiescent(drm_device_t *dev)
{
while (GAMMA_READ(GAMMA_DMACOUNT))
;
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
;
GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
GAMMA_WRITE(GAMMA_SYNC, 0);
/* Read from first MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
;
} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
/* Read from second MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
;
} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
}
static inline void gamma_dma_ready(drm_device_t *dev)
{
while (GAMMA_READ(GAMMA_DMACOUNT))
;
}
static inline int gamma_dma_is_ready(drm_device_t *dev)
{
return !GAMMA_READ(GAMMA_DMACOUNT);
}
static void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_device_dma_t *dma = dev->dma;
atomic_inc(&dev->total_irq);
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
if (gamma_dma_is_ready(dev)) {
/* Free previous buffer */
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_free);
return;
}
if (dma->this_buffer) {
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = NULL;
}
clear_bit(0, &dev->dma_flag);
/* Dispatch new buffer */
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
}
/* Only called by gamma_dma_schedule. */
static int gamma_do_dma(drm_device_t *dev, int locked)
{
unsigned long address;
unsigned long length;
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t dma_start, dma_stop;
#endif
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_dma);
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dma_start = get_cycles();
#endif
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
buf = dma->next_buffer;
address = (unsigned long)buf->address;
length = buf->used;
DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
buf->context, buf->idx, length);
if (buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
if (!length) {
DRM_ERROR("0 length buffer\n");
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return 0;
}
if (!gamma_dma_is_ready(dev)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
if (buf->while_locked) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Dispatching buffer %d from pid %d"
" \"while locked\", but no lock held\n",
buf->idx, buf->pid);
}
} else {
if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
atomic_inc(&dma->total_missed_lock);
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
}
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
/* PRE: dev->last_context != buf->context */
if (drm_context_switch(dev, dev->last_context, buf->context)) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
retcode = -EBUSY;
goto cleanup;
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
}
drm_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
#if DRM_DMA_HISTOGRAM
buf->time_dispatched = get_cycles();
#endif
gamma_dma_dispatch(dev, address, length);
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
atomic_add(length, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (!buf->while_locked && !dev->context_flag && !locked) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
cleanup:
clear_bit(0, &dev->dma_flag);
#if DRM_DMA_HISTOGRAM
dma_stop = get_cycles();
atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
#endif
return retcode;
}
static void gamma_dma_schedule_timer_wrapper(unsigned long dev)
{
gamma_dma_schedule((drm_device_t *)dev, 0);
}
static void gamma_dma_schedule_tq_wrapper(void *dev)
{
gamma_dma_schedule(dev, 0);
}
int gamma_dma_schedule(drm_device_t *dev, int locked)
{
int next;
drm_queue_t *q;
drm_buf_t *buf;
int retcode = 0;
int processed = 0;
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t schedule_start;
#endif
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dma->total_missed_sched);
return -EBUSY;
}
missed = atomic_read(&dma->total_missed_sched);
#if DRM_DMA_HISTOGRAM
schedule_start = get_cycles();
#endif
again:
if (dev->context_flag) {
clear_bit(0, &dev->interrupt_flag);
return -EBUSY;
}
if (dma->next_buffer) {
/* Unsent buffer that was previously
selected, but that couldn't be sent
because the lock could not be obtained
or the DMA engine wasn't ready. Try
again. */
atomic_inc(&dma->total_tried);
if (!(retcode = gamma_do_dma(dev, locked))) {
atomic_inc(&dma->total_hit);
++processed;
}
} else {
do {
next = drm_select_queue(dev,
gamma_dma_schedule_timer_wrapper);
if (next >= 0) {
q = dev->queuelist[next];
buf = drm_waitlist_get(&q->waitlist);
dma->next_buffer = buf;
dma->next_queue = q;
if (buf && buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
}
} while (next >= 0 && !dma->next_buffer);
if (dma->next_buffer) {
if (!(retcode = gamma_do_dma(dev, locked))) {
++processed;
}
}
}
if (--expire) {
if (missed != atomic_read(&dma->total_missed_sched)) {
atomic_inc(&dma->total_lost);
if (gamma_dma_is_ready(dev)) goto again;
}
if (processed && gamma_dma_is_ready(dev)) {
atomic_inc(&dma->total_lost);
processed = 0;
goto again;
}
}
clear_bit(0, &dev->interrupt_flag);
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
- schedule_start)]);
#endif
return retcode;
}
static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
{
unsigned long address;
unsigned long length;
int must_free = 0;
int retcode = 0;
int i;
int idx;
drm_buf_t *buf;
drm_buf_t *last_buf = NULL;
drm_device_dma_t *dma = dev->dma;
DECLARE_WAITQUEUE(entry, current);
/* Turn off interrupt handling */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) return -EINTR;
}
if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
while (!drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
++must_free;
}
atomic_inc(&dma->total_prio);
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
continue;
}
buf = dma->buflist[ idx ];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
retcode = -EINVAL;
goto cleanup;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using %d's buffer on list %d\n",
current->pid, buf->pid, buf->list);
retcode = -EINVAL;
goto cleanup;
}
/* This isn't a race condition on
buf->list, since our concern is the
buffer reclaim during the time the
process closes the /dev/drm? handle, so
it can't also be doing DMA. */
buf->list = DRM_LIST_PRIO;
buf->used = d->send_sizes[i];
buf->context = d->context;
buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
address = (unsigned long)buf->address;
length = buf->used;
if (!length) {
DRM_ERROR("0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
if (buf->waiting) {
DRM_ERROR("Sending waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
buf->pending = 1;
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != buf->context */
drm_context_switch(dev, dev->last_context,
buf->context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
retcode = -EINTR;
goto cleanup;
}
if (dev->last_context != buf->context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context,
buf->context);
}
}
#if DRM_DMA_HISTOGRAM
buf->time_queued = get_cycles();
buf->time_dispatched = buf->time_queued;
#endif
gamma_dma_dispatch(dev, address, length);
atomic_add(length, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (last_buf) {
drm_free_buffer(dev, last_buf);
}
last_buf = buf;
}
cleanup:
if (last_buf) {
gamma_dma_ready(dev);
drm_free_buffer(dev, last_buf);
}
if (must_free && !dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
{
DECLARE_WAITQUEUE(entry, current);
drm_buf_t *last_buf = NULL;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
if (d->flags & _DRM_DMA_BLOCK) {
last_buf = dma->buflist[d->send_indices[d->send_count-1]];
add_wait_queue(&last_buf->dma_wait, &entry);
}
if ((retcode = drm_dma_enqueue(dev, d))) {
if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode;
}
gamma_dma_schedule(dev, 0);
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", current->pid);
current->state = TASK_INTERRUPTIBLE;
for (;;) {
if (!last_buf->waiting
&& !last_buf->pending)
break; /* finished */
schedule();
if (signal_pending(current)) {
retcode = -EINTR; /* Can't restart */
break;
}
}
current->state = TASK_RUNNING;
DRM_DEBUG("%d running\n", current->pid);
remove_wait_queue(&last_buf->dma_wait, &entry);
if (!retcode
|| (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
if (!waitqueue_active(&last_buf->dma_wait)) {
drm_free_buffer(dev, last_buf);
}
}
if (retcode) {
DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
d->context,
last_buf->waiting,
last_buf->pending,
DRM_WAITCOUNT(dev, d->context),
last_buf->idx,
last_buf->list,
last_buf->pid,
current->pid);
}
}
return retcode;
}
int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t d;
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
DRM_ERROR("Process %d using context %d\n",
current->pid, d.context);
return -EINVAL;
}
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, d.send_count, dma->buf_count);
return -EINVAL;
}
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
}
if (d.send_count) {
if (d.flags & _DRM_DMA_PRIORITY)
retcode = gamma_dma_priority(dev, &d);
else
retcode = gamma_dma_send_buffers(dev, &d);
}
d.granted_count = 0;
if (!retcode && d.request_count) {
retcode = drm_dma_get_buffers(dev, &d);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
return retcode;
}
int gamma_irq_install(drm_device_t *dev, int irq)
{
int retcode;
if (!irq) return -EINVAL;
down(&dev->struct_sem);
if (dev->irq) {
up(&dev->struct_sem);
return -EBUSY;
}
dev->irq = irq;
up(&dev->struct_sem);
DRM_DEBUG("%d\n", irq);
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
dev->tq.next = NULL;
dev->tq.sync = 0;
dev->tq.routine = gamma_dma_schedule_tq_wrapper;
dev->tq.data = dev;
/* Before installing handler */
GAMMA_WRITE(GAMMA_GCOMMANDMODE, 0);
GAMMA_WRITE(GAMMA_GDMACONTROL, 0);
/* Install handler */
if ((retcode = request_irq(dev->irq,
gamma_dma_service,
0,
dev->devname,
dev))) {
down(&dev->struct_sem);
dev->irq = 0;
up(&dev->struct_sem);
return retcode;
}
/* After installing handler */
GAMMA_WRITE(GAMMA_GINTENABLE, 0x2001);
GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0x0008);
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0x39090);
return 0;
}
int gamma_irq_uninstall(drm_device_t *dev)
{
int irq;
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
up(&dev->struct_sem);
if (!irq) return -EINVAL;
DRM_DEBUG("%d\n", irq);
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0);
GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0);
GAMMA_WRITE(GAMMA_GINTENABLE, 0);
free_irq(irq, dev);
return 0;
}
int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_control_t ctl;
int retcode;
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
switch (ctl.func) {
case DRM_INST_HANDLER:
if ((retcode = gamma_irq_install(dev, ctl.irq)))
return retcode;
break;
case DRM_UNINST_HANDLER:
if ((retcode = gamma_irq_uninstall(dev)))
return retcode;
break;
default:
return -EINVAL;
}
return 0;
}
int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
drm_queue_t *q;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
if (lock.context < 0 || lock.context >= dev->queue_count)
return -EINVAL;
q = dev->queuelist[lock.context];
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
if (!ret) {
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (j > 0 && j <= DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(j);
}
}
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
atomic_inc(&q->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
if (!ret) {
if (lock.flags & _DRM_LOCK_READY)
gamma_dma_ready(dev);
if (lock.flags & _DRM_LOCK_QUIESCENT)
gamma_dma_quiescent(dev);
}
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}

525
linux/gamma_drv.c Normal file
View File

@ -0,0 +1,525 @@
/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:36 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.17 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.1 1999/09/25 14:38:00 dawes Exp $
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "gamma_drv.h"
EXPORT_SYMBOL(gamma_init);
EXPORT_SYMBOL(gamma_cleanup);
#define GAMMA_NAME "gamma"
#define GAMMA_DESC "3dlabs GMX 2000"
#define GAMMA_DATE "19990830"
#define GAMMA_MAJOR 0
#define GAMMA_MINOR 0
#define GAMMA_PATCHLEVEL 5
static drm_device_t gamma_device;
static struct file_operations gamma_fops = {
open: gamma_open,
flush: drm_flush,
release: gamma_release,
ioctl: gamma_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice gamma_misc = {
minor: MISC_DYNAMIC_MINOR,
name: GAMMA_NAME,
fops: &gamma_fops,
};
static drm_ioctl_desc_t gamma_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { gamma_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { gamma_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { gamma_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { gamma_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
};
#define GAMMA_IOCTL_COUNT DRM_ARRAY_SIZE(gamma_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *gamma = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("3dlabs GMX 2000");
MODULE_PARM(gamma, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
return gamma_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
gamma_cleanup();
}
#endif
#ifndef MODULE
/* gamma_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init gamma_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int gamma_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
#if DRM_DMA_HISTO
memset(&dev->histo, 0, sizeof(dev->histo));
#endif
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int gamma_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) gamma_irq_uninstall(dev);
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* gamma_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int gamma_init(void)
{
int retcode;
drm_device_t *dev = &gamma_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(gamma);
#endif
if ((retcode = misc_register(&gamma_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", GAMMA_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, gamma_misc.minor);
dev->name = GAMMA_NAME;
drm_mem_init();
drm_proc_init(dev);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
GAMMA_NAME,
GAMMA_MAJOR,
GAMMA_MINOR,
GAMMA_PATCHLEVEL,
GAMMA_DATE,
gamma_misc.minor);
return 0;
}
/* gamma_cleanup is called via cleanup_module at module unload time. */
void gamma_cleanup(void)
{
drm_device_t *dev = &gamma_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&gamma_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
gamma_takedown(dev);
}
int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = GAMMA_MAJOR;
version.version_minor = GAMMA_MINOR;
version.version_patchlevel = GAMMA_PATCHLEVEL;
DRM_COPY(version.name, GAMMA_NAME);
DRM_COPY(version.date, GAMMA_DATE);
DRM_COPY(version.desc, GAMMA_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int gamma_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &gamma_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return gamma_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int gamma_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return gamma_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int gamma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= GAMMA_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &gamma_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int gamma_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
gamma_dma_schedule(dev, 1);
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
- dev->lck_start)]);
#endif
return 0;
}

58
linux/gamma_drv.h Normal file
View File

@ -0,0 +1,58 @@
/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 09:24:27 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.h,v 1.4 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.h,v 1.1 1999/09/25 14:38:00 dawes Exp $
*
*/
#ifndef _GAMMA_DRV_H_
#define _GAMMA_DRV_H_
/* gamma_drv.c */
extern int gamma_init(void);
extern void gamma_cleanup(void);
extern int gamma_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int gamma_open(struct inode *inode, struct file *filp);
extern int gamma_release(struct inode *inode, struct file *filp);
extern int gamma_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int gamma_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int gamma_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* gamma_dma.c */
extern int gamma_dma_schedule(drm_device_t *dev, int locked);
extern int gamma_dma(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int gamma_irq_install(drm_device_t *dev, int irq);
extern int gamma_irq_uninstall(drm_device_t *dev);
extern int gamma_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#endif

99
linux/init.c Normal file
View File

@ -0,0 +1,99 @@
/* init.c -- Setup/Cleanup for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 09:27:02 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/init.c,v 1.3 1999/08/20 15:07:01 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/init.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_flags = 0;
/* drm_parse_option parses a single option. See description for
drm_parse_drm for details. */
static void drm_parse_option(char *s)
{
char *c, *r;
DRM_DEBUG("\"%s\"\n", s);
if (!s || !*s) return;
for (c = s; *c && *c != ':'; c++); /* find : or \0 */
if (*c) r = c + 1; else r = NULL; /* remember remainder */
*c = '\0'; /* terminate */
if (!strcmp(s, "noctx")) {
drm_flags |= DRM_FLAG_NOCTX;
DRM_INFO("Server-mediated context switching OFF\n");
return;
}
if (!strcmp(s, "debug")) {
drm_flags |= DRM_FLAG_DEBUG;
DRM_INFO("Debug messages ON\n");
return;
}
DRM_ERROR("\"%s\" is not a valid option\n", s);
return;
}
/* drm_parse_options parse the insmod "drm=" options, or the command-line
* options passed to the kernel via LILO. The grammar of the format is as
* follows:
*
* drm ::= 'drm=' option_list
* option_list ::= option [ ';' option_list ]
* option ::= 'device:' major
* | 'debug'
* | 'noctx'
* major ::= INTEGER
*
* Note that 's' contains option_list without the 'drm=' part.
*
* device=major,minor specifies the device number used for /dev/drm
* if major == 0 then the misc device is used
* if major == 0 and minor == 0 then dynamic misc allocation is used
* debug=on specifies that debugging messages will be printk'd
* debug=trace specifies that each function call will be logged via printk
* debug=off turns off all debugging options
*
*/
void drm_parse_options(char *s)
{
char *h, *t, *n;
DRM_DEBUG("\"%s\"\n", s ?: "");
if (!s || !*s) return;
for (h = t = n = s; h && *h; h = n) {
for (; *t && *t != ';'; t++); /* find ; or \0 */
if (*t) n = t + 1; else n = NULL; /* remember next */
*t = '\0'; /* terminate */
drm_parse_option(h); /* parse */
}
}

91
linux/ioctl.c Normal file
View File

@ -0,0 +1,91 @@
/* ioctl.c -- IOCTL processing for DRM -*- linux-c -*-
* Created: Fri Jan 8 09:01:26 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 09:27:02 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ioctl.c,v 1.3 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ioctl.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_irq_busid(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_irq_busid_t p;
struct pci_dev *dev;
copy_from_user_ret(&p, (drm_irq_busid_t *)arg, sizeof(p), -EFAULT);
dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum));
if (dev) p.irq = dev->irq;
else p.irq = 0;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
copy_to_user_ret((drm_irq_busid_t *)arg, &p, sizeof(p), -EFAULT);
return 0;
}
int drm_getunique(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_unique_t u;
copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT);
if (u.unique_len >= dev->unique_len) {
copy_to_user_ret(u.unique, dev->unique, dev->unique_len,
-EFAULT);
}
u.unique_len = dev->unique_len;
copy_to_user_ret((drm_unique_t *)arg, &u, sizeof(u), -EFAULT);
return 0;
}
int drm_setunique(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_unique_t u;
if (dev->unique_len || dev->unique) return -EBUSY;
copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT);
if (!u.unique_len) return -EINVAL;
dev->unique_len = u.unique_len;
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
copy_from_user_ret(dev->unique, u.unique, dev->unique_len,
-EFAULT);
dev->unique[dev->unique_len] = '\0';
dev->devname = drm_alloc(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER);
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
return 0;
}

252
linux/lists.c Normal file
View File

@ -0,0 +1,252 @@
/* lists.c -- Buffer list handling routines -*- linux-c -*-
* Created: Mon Apr 19 20:54:22 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 09:27:01 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.3 1999/08/20 15:07:02 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lists.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_waitlist_create(drm_waitlist_t *bl, int count)
{
DRM_DEBUG("%d\n", count);
if (bl->count) return -EINVAL;
bl->count = count;
bl->bufs = drm_alloc((bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->rp = bl->bufs;
bl->wp = bl->bufs;
bl->end = &bl->bufs[bl->count+1];
bl->write_lock = SPIN_LOCK_UNLOCKED;
bl->read_lock = SPIN_LOCK_UNLOCKED;
return 0;
}
int drm_waitlist_destroy(drm_waitlist_t *bl)
{
DRM_DEBUG("\n");
if (bl->rp != bl->wp) return -EINVAL;
if (bl->bufs) drm_free(bl->bufs,
(bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->count = 0;
bl->bufs = NULL;
bl->rp = NULL;
bl->wp = NULL;
bl->end = NULL;
return 0;
}
int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf)
{
int left;
unsigned long flags;
left = DRM_LEFTCOUNT(bl);
DRM_DEBUG("put %d (%d left, rp = %p, wp = %p)\n",
buf->idx, left, bl->rp, bl->wp);
if (!left) {
DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
buf->idx, buf->pid);
return -EINVAL;
}
#if DRM_DMA_HISTOGRAM
buf->time_queued = get_cycles();
#endif
buf->list = DRM_LIST_WAIT;
spin_lock_irqsave(&bl->write_lock, flags);
*bl->wp = buf;
if (++bl->wp >= bl->end) bl->wp = bl->bufs;
spin_unlock_irqrestore(&bl->write_lock, flags);
return 0;
}
drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl)
{
drm_buf_t *buf;
unsigned long flags;
spin_lock_irqsave(&bl->read_lock, flags);
buf = *bl->rp;
if (bl->rp == bl->wp) {
spin_unlock_irqrestore(&bl->read_lock, flags);
return NULL;
}
if (++bl->rp >= bl->end) bl->rp = bl->bufs;
spin_unlock_irqrestore(&bl->read_lock, flags);
DRM_DEBUG("get %d\n", buf->idx);
return buf;
}
int drm_freelist_create(drm_freelist_t *bl, int count)
{
DRM_DEBUG("\n");
atomic_set(&bl->count, 0);
bl->next = NULL;
init_waitqueue_head(&bl->waiting);
bl->low_mark = 0;
bl->high_mark = 0;
atomic_set(&bl->wfh, 0);
++bl->initialized;
return 0;
}
int drm_freelist_destroy(drm_freelist_t *bl)
{
DRM_DEBUG("\n");
atomic_set(&bl->count, 0);
bl->next = NULL;
return 0;
}
int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
{
unsigned int old;
unsigned int new;
char failed;
int count = 0;
drm_device_dma_t *dma = dev->dma;
if (!dma) {
DRM_ERROR("No DMA support\n");
return 1;
}
if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
buf->waiting, buf->pending);
if (!bl) return 1;
#if DRM_DMA_HISTOGRAM
buf->time_freed = get_cycles();
drm_histogram_compute(dev, buf);
#endif
buf->list = DRM_LIST_FREE;
do {
old = (unsigned long)bl->next;
buf->next = (void *)old;
new = (unsigned long)buf;
_DRM_CAS(&bl->next, old, new, failed);
if (++count > DRM_LOOPING_LIMIT) {
DRM_ERROR("Looping\n");
return 1;
}
} while (failed);
atomic_inc(&bl->count);
if (atomic_read(&bl->count) > dma->buf_count) {
DRM_ERROR("%d of %d buffers free after addition of %d\n",
atomic_read(&bl->count), dma->buf_count, buf->idx);
return 1;
}
/* Check for high water mark */
if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
atomic_set(&bl->wfh, 0);
wake_up_interruptible(&bl->waiting);
}
return 0;
}
static drm_buf_t *drm_freelist_try(drm_freelist_t *bl)
{
unsigned int old;
unsigned int new;
char failed;
drm_buf_t *buf;
int count = 0;
if (!bl) return NULL;
/* Get buffer */
do {
old = (unsigned int)bl->next;
if (!old) {
return NULL;
}
new = (unsigned long)bl->next->next;
_DRM_CAS(&bl->next, old, new, failed);
if (++count > DRM_LOOPING_LIMIT) {
DRM_ERROR("Looping\n");
return NULL;
}
} while (failed);
atomic_dec(&bl->count);
buf = (drm_buf_t *)old;
buf->next = NULL;
buf->list = DRM_LIST_NONE;
DRM_DEBUG("%d, count = %d, wfh = %d, w%d, p%d\n",
buf->idx, atomic_read(&bl->count), atomic_read(&bl->wfh),
buf->waiting, buf->pending);
if (buf->waiting || buf->pending) {
DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
return buf;
}
drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block)
{
drm_buf_t *buf = NULL;
DECLARE_WAITQUEUE(entry, current);
if (!bl || !bl->initialized) return NULL;
/* Check for low water mark */
if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
atomic_set(&bl->wfh, 1);
if (atomic_read(&bl->wfh)) {
DRM_DEBUG("Block = %d, count = %d, wfh = %d\n",
block, atomic_read(&bl->count),
atomic_read(&bl->wfh));
if (block) {
add_wait_queue(&bl->waiting, &entry);
current->state = TASK_INTERRUPTIBLE;
for (;;) {
if (!atomic_read(&bl->wfh)
&& (buf = drm_freelist_try(bl))) break;
schedule();
if (signal_pending(current)) break;
}
current->state = TASK_RUNNING;
remove_wait_queue(&bl->waiting, &entry);
}
return buf;
}
DRM_DEBUG("Count = %d, wfh = %d\n",
atomic_read(&bl->count), atomic_read(&bl->wfh));
return drm_freelist_try(bl);
}

231
linux/lock.c Normal file
View File

@ -0,0 +1,231 @@
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:06 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.5 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.1 1999/09/25 14:38:01 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
int drm_block(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
DRM_DEBUG("\n");
return 0;
}
int drm_unblock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
DRM_DEBUG("\n");
return 0;
}
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old;
unsigned int new;
char failed;
DRM_DEBUG("%d attempts\n", context);
do {
old = *lock;
if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
else new = context | _DRM_LOCK_HELD;
_DRM_CAS(lock, old, new, failed);
} while (failed);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
/* Have lock */
DRM_DEBUG("%d\n", context);
return 1;
}
DRM_DEBUG("%d unable to get lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 0;
}
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old;
unsigned int new;
char failed;
dev->lock.pid = 0;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
_DRM_CAS(lock, old, new, failed);
} while (failed);
DRM_DEBUG("%d => %d\n", _DRM_LOCKING_CONTEXT(old), context);
return 1;
}
int drm_lock_free(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old;
unsigned int new;
char failed;
pid_t pid = dev->lock.pid;
DRM_DEBUG("%d\n", context);
dev->lock.pid = 0;
do {
old = *lock;
new = 0;
_DRM_CAS(lock, old, new, failed);
} while (failed);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
context,
_DRM_LOCKING_CONTEXT(old),
pid);
return 1;
}
wake_up_interruptible(&dev->lock.lock_queue);
return 0;
}
static int drm_flush_queue(drm_device_t *dev, int context)
{
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
atomic_inc(&q->block_write);
current->state = TASK_INTERRUPTIBLE;
add_wait_queue(&q->flush_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
if (!DRM_BUFCOUNT(&q->waitlist)) break;
schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
break;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->flush_queue, &entry);
}
atomic_dec(&q->use_count);
atomic_inc(&q->total_flushed);
/* NOTE: block_write is still incremented!
Use drm_flush_unlock_queue to decrement. */
return ret;
}
static int drm_flush_unblock_queue(drm_device_t *dev, int context)
{
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
if (atomic_read(&q->block_write)) {
atomic_dec(&q->block_write);
wake_up_interruptible(&q->write_queue);
}
}
atomic_dec(&q->use_count);
return 0;
}
int drm_flush_block_and_flush(drm_device_t *dev, int context,
drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = drm_flush_queue(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = drm_flush_queue(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = drm_flush_queue(dev, i);
}
}
return ret;
}
int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = drm_flush_unblock_queue(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = drm_flush_unblock_queue(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = drm_flush_unblock_queue(dev, i);
}
}
return ret;
}
int drm_finish(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int ret = 0;
drm_lock_t lock;
DRM_DEBUG("\n");
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
drm_flush_unblock(dev, lock.context, lock.flags);
return ret;
}

320
linux/memory.c Normal file
View File

@ -0,0 +1,320 @@
/* memory.c -- Memory management wrappers for DRM -*- linux-c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 13:04:33 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.4 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.1 1999/09/25 14:38:02 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
typedef struct drm_mem_stats {
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
} drm_mem_stats_t;
static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
static unsigned long drm_ram_available = 0;
static unsigned long drm_ram_used = 0;
static drm_mem_stats_t drm_mem_stats[] = {
[DRM_MEM_DMA] = { "dmabufs" },
[DRM_MEM_SAREA] = { "sareas" },
[DRM_MEM_DRIVER] = { "driver" },
[DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" },
[DRM_MEM_FILES] = { "files" },
[DRM_MEM_QUEUES] = { "queues" },
[DRM_MEM_CMDS] = { "commands" },
[DRM_MEM_MAPPINGS] = { "mappings" },
[DRM_MEM_BUFLISTS] = { "buflists" },
{ NULL, 0, } /* Last entry must be null */
};
void drm_mem_init(void)
{
drm_mem_stats_t *mem;
struct sysinfo si;
for (mem = drm_mem_stats; mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->bytes_allocated = 0;
mem->bytes_freed = 0;
}
si_meminfo(&si);
drm_ram_available = si.totalram;
drm_ram_used = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
static int _drm_mem_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_mem_stats_t *pt;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT(" total counts "
" | outstanding \n");
DRM_PROC_PRINT("type alloc freed fail bytes freed"
" | allocs bytes\n\n");
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
"system", 0, 0, 0, drm_ram_available);
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
"locked", 0, 0, 0, drm_ram_used);
DRM_PROC_PRINT("\n");
for (pt = drm_mem_stats; pt->name; pt++) {
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
pt->name,
pt->succeed_count,
pt->free_count,
pt->fail_count,
pt->bytes_allocated,
pt->bytes_freed,
pt->succeed_count - pt->free_count,
(long)pt->bytes_allocated
- (long)pt->bytes_freed);
}
return len;
}
int drm_mem_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
int ret;
spin_lock(&drm_mem_lock);
ret = _drm_mem_info(buf, start, offset, len, eof, data);
spin_unlock(&drm_mem_lock);
return ret;
}
void *drm_alloc(size_t size, int area)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
return NULL;
}
if (!(pt = kmalloc(size, GFP_KERNEL))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[area].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
{
void *pt;
if (!(pt = drm_alloc(size, area))) return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
drm_free(oldpt, oldsize, area);
}
return pt;
}
char *drm_strdup(const char *s, int area)
{
char *pt;
int length = s ? strlen(s) : 0;
if (!(pt = drm_alloc(length+1, area))) return NULL;
strcpy(pt, s);
return pt;
}
void drm_strfree(const char *s, int area)
{
unsigned int size;
if (!s) return;
size = 1 + (s ? strlen(s) : 0);
drm_free((void *)s, size, area);
}
void drm_free(void *pt, size_t size, int area)
{
int alloc_count;
int free_count;
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else kfree_s(pt, size);
spin_lock(&drm_mem_lock);
drm_mem_stats[area].bytes_freed += size;
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
unsigned long drm_alloc_pages(int order, int area)
{
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
spin_lock(&drm_mem_lock);
if (drm_ram_used > +(DRM_RAM_PERCENT * drm_ram_available) / 100) {
spin_unlock(&drm_mem_lock);
return 0;
}
spin_unlock(&drm_mem_lock);
address = __get_free_pages(GFP_KERNEL, order);
if (!address) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[area].fail_count;
spin_unlock(&drm_mem_lock);
return 0;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_allocated += bytes;
drm_ram_used += bytes;
spin_unlock(&drm_mem_lock);
/* Zero outside the lock */
memset((void *)address, 0, bytes);
/* Reserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
mem_map_reserve(MAP_NR(addr));
}
return address;
}
void drm_free_pages(unsigned long address, int order, int area)
{
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
unsigned long addr;
unsigned int sz;
if (!address) {
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
} else {
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
mem_map_unreserve(MAP_NR(addr));
}
free_pages(address, order);
}
spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_freed += bytes;
drm_ram_used -= bytes;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
void *drm_ioremap(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = ioremap(offset, size))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
void drm_ioremapfree(void *pt, unsigned long size)
{
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
iounmap(pt);
spin_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}

568
linux/proc.c Normal file
View File

@ -0,0 +1,568 @@
/* proc.c -- /proc support for DRM -*- linux-c -*-
* Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 11:31:48 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.4 1999/08/20 15:36:46 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.1 1999/09/25 14:38:02 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
static struct proc_dir_entry *drm_root = NULL;
static struct proc_dir_entry *drm_dev_root = NULL;
static char drm_slot_name[64];
static int drm_name_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_vm_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_clients_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_queues_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
#endif
#if DRM_DMA_HISTOGRAM
static int drm_histo_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data);
#endif
struct drm_proc_list {
const char *name;
int (*f)(char *, char **, off_t, int, int *, void *);
} drm_proc_list[] = {
{ "name", drm_name_info },
{ "mem", drm_mem_info },
{ "vm", drm_vm_info },
{ "clients", drm_clients_info },
{ "queues", drm_queues_info },
{ "bufs", drm_bufs_info },
#if DRM_DEBUG_CODE
{ "vma", drm_vma_info },
#endif
#if DRM_DMA_HISTOGRAM
{ "histo", drm_histo_info },
#endif
};
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
int drm_proc_init(drm_device_t *dev)
{
struct proc_dir_entry *ent;
int i, j;
drm_root = create_proc_entry("graphics", S_IFDIR, NULL);
if (!drm_root) {
DRM_ERROR("Cannot create /proc/graphics\n");
return -1;
}
/* Instead of doing this search, we should
add some global support for /proc/graphics. */
for (i = 0; i < 8; i++) {
sprintf(drm_slot_name, "graphics/%d", i);
drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL);
if (!drm_dev_root) {
DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name);
remove_proc_entry("graphics", NULL);
}
if (drm_dev_root->nlink == 2) break;
drm_dev_root = NULL;
}
if (!drm_dev_root) {
DRM_ERROR("Cannot find slot in /proc/graphics\n");
return -1;
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
ent = create_proc_entry(drm_proc_list[i].name,
S_IFREG|S_IRUGO, drm_dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/%s/%s\n",
drm_slot_name, drm_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
drm_dev_root);
remove_proc_entry(drm_slot_name, NULL);
remove_proc_entry("graphics", NULL);
return -1;
}
ent->read_proc = drm_proc_list[i].f;
ent->data = dev;
}
return 0;
}
int drm_proc_cleanup(void)
{
int i;
if (drm_root) {
if (drm_dev_root) {
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
remove_proc_entry(drm_proc_list[i].name,
drm_dev_root);
}
remove_proc_entry(drm_slot_name, NULL);
}
remove_proc_entry("graphics", NULL);
remove_proc_entry(DRM_NAME, NULL);
}
drm_root = drm_dev_root = NULL;
return 0;
}
static int drm_name_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
if (dev->unique) {
DRM_PROC_PRINT("%s 0x%x %s\n",
dev->name, dev->device, dev->unique);
} else {
DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device);
}
return len;
}
static int _drm_vm_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_map_t *map;
const char *types[] = { "FB", "REG", "SHM" };
const char *type;
int i;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
if (map->type < 0 || map->type > 2) type = "??";
else type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size,
type,
map->flags,
(unsigned long)map->handle);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
}
return len;
}
static int drm_vm_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_vm_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
static int _drm_queues_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int i;
drm_queue_t *q;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT(" ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
" locks\n\n");
for (i = 0; i < dev->queue_count; i++) {
q = dev->queuelist[i];
atomic_inc(&q->use_count);
DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
"%5d/0x%03x %5d %5d"
" %5d/%c%c/%c%c%c %5d %10d %10d %10d\n",
i,
q->flags,
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
waitqueue_active(&q->read_queue) ? 'r':'-',
waitqueue_active(&q->write_queue) ? 'w':'-',
waitqueue_active(&q->flush_queue) ? 'f':'-',
DRM_BUFCOUNT(&q->waitlist),
atomic_read(&q->total_flushed),
atomic_read(&q->total_queued),
atomic_read(&q->total_locks));
atomic_dec(&q->use_count);
}
return len;
}
static int drm_queues_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_queues_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
/* drm_bufs_info is called whenever a process reads
/dev/drm/<dev>/bufs. */
static int _drm_bufs_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return 0;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].buf_count)
DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
atomic_read(&dma->bufs[i]
.freelist.count),
dma->bufs[i].seg_count,
dma->bufs[i].seg_count
*(1 << dma->bufs[i].page_order),
(dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order))
* PAGE_SIZE / 1024);
}
DRM_PROC_PRINT("\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i%32)) DRM_PROC_PRINT("\n");
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
}
DRM_PROC_PRINT("\n");
return len;
}
static int drm_bufs_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_bufs_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
static int _drm_clients_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_file_t *priv;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
for (priv = dev->file_first; priv; priv = priv->next) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
priv->pid,
priv->uid,
priv->magic,
priv->ioctl_count);
}
return len;
}
static int drm_clients_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_clients_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
#if DRM_DEBUG_CODE
static int _drm_vma_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_vma_entry_t *pt;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long i;
struct vm_area_struct *vma;
unsigned long address;
#if defined(__i386__)
unsigned int pgprot;
#endif
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma)) continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
pt->pid,
vma->vm_start,
vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_offset );
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_4M ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
#endif
DRM_PROC_PRINT("\n");
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
pte = pte_offset(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx"
" %c%c%c%c%c\n",
i,
address,
pte_read(*pte) ? 'r' : '-',
pte_write(*pte) ? 'w' : '-',
pte_exec(*pte) ? 'x' : '-',
pte_dirty(*pte) ? 'd' : '-',
pte_young(*pte) ? 'a' : '-' );
} else {
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
}
}
return len;
}
static int drm_vma_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_vma_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
#endif
#if DRM_DMA_HISTOGRAM
static int _drm_histo_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
drm_device_dma_t *dma = dev->dma;
int i;
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
unsigned long prev_value = 0;
drm_buf_t *buffer;
if (offset > 0) return 0; /* no partial requests */
len = 0;
*eof = 1;
DRM_PROC_PRINT("general statistics:\n");
DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
DRM_PROC_PRINT("open %10u\n", atomic_read(&dev->total_open));
DRM_PROC_PRINT("close %10u\n", atomic_read(&dev->total_close));
DRM_PROC_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
DRM_PROC_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
DRM_PROC_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
DRM_PROC_PRINT("\nlock statistics:\n");
DRM_PROC_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
DRM_PROC_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
DRM_PROC_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
DRM_PROC_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
if (dma) {
DRM_PROC_PRINT("\ndma statistics:\n");
DRM_PROC_PRINT("prio %10u\n",
atomic_read(&dma->total_prio));
DRM_PROC_PRINT("bytes %10u\n",
atomic_read(&dma->total_bytes));
DRM_PROC_PRINT("dmas %10u\n",
atomic_read(&dma->total_dmas));
DRM_PROC_PRINT("missed:\n");
DRM_PROC_PRINT(" dma %10u\n",
atomic_read(&dma->total_missed_dma));
DRM_PROC_PRINT(" lock %10u\n",
atomic_read(&dma->total_missed_lock));
DRM_PROC_PRINT(" free %10u\n",
atomic_read(&dma->total_missed_free));
DRM_PROC_PRINT(" sched %10u\n",
atomic_read(&dma->total_missed_sched));
DRM_PROC_PRINT("tried %10u\n",
atomic_read(&dma->total_tried));
DRM_PROC_PRINT("hit %10u\n",
atomic_read(&dma->total_hit));
DRM_PROC_PRINT("lost %10u\n",
atomic_read(&dma->total_lost));
buffer = dma->next_buffer;
if (buffer) {
DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
} else {
DRM_PROC_PRINT("next_buffer none\n");
}
buffer = dma->this_buffer;
if (buffer) {
DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
} else {
DRM_PROC_PRINT("this_buffer none\n");
}
}
DRM_PROC_PRINT("\nvalues:\n");
if (dev->lock.hw_lock) {
DRM_PROC_PRINT("lock 0x%08x\n",
dev->lock.hw_lock->lock);
} else {
DRM_PROC_PRINT("lock none\n");
}
DRM_PROC_PRINT("context_flag 0x%08x\n", dev->context_flag);
DRM_PROC_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
DRM_PROC_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
DRM_PROC_PRINT("\n q2d d2c c2f"
" q2c q2f dma sch"
" ctx lacq lhld\n\n");
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
" %10u %10u %10u %10u %10u\n",
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
i == DRM_DMA_HISTOGRAM_SLOTS - 1
? prev_value : slot_value ,
atomic_read(&dev->histo
.queued_to_dispatched[i]),
atomic_read(&dev->histo
.dispatched_to_completed[i]),
atomic_read(&dev->histo
.completed_to_freed[i]),
atomic_read(&dev->histo
.queued_to_completed[i]),
atomic_read(&dev->histo
.queued_to_freed[i]),
atomic_read(&dev->histo.dma[i]),
atomic_read(&dev->histo.schedule[i]),
atomic_read(&dev->histo.ctx[i]),
atomic_read(&dev->histo.lacq[i]),
atomic_read(&dev->histo.lhld[i]));
prev_value = slot_value;
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
}
return len;
}
static int drm_histo_info(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = _drm_histo_info(buf, start, offset, len, eof, data);
up(&dev->struct_sem);
return ret;
}
#endif

206
linux/tdfx_context.c Normal file
View File

@ -0,0 +1,206 @@
/* tdfx_context.c -- IOCTLs for tdfx contexts -*- linux-c -*-
* Created: Thu Oct 7 10:50:22 1999 by faith@precisioninsight.com
* Revised: Sat Oct 9 23:39:56 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI$
* $XFree86$
*
*/
#include <linux/sched.h>
#define __NO_VERSION__
#include "drmP.h"
#include "tdfx_drv.h"
extern drm_ctx_t tdfx_res_ctx;
static int tdfx_alloc_queue(drm_device_t *dev)
{
static int context = 0;
return ++context; /* Should this reuse contexts in the future? */
}
int tdfx_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
tdfx_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int tdfx_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up(&dev->context_wait);
return 0;
}
int tdfx_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
return 0;
}
int tdfx_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if ((ctx.handle = tdfx_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = tdfx_alloc_queue(dev);
}
DRM_DEBUG("%d\n", ctx.handle);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
return 0;
}
int tdfx_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
if (ctx.flags==_DRM_CONTEXT_PRESERVED)
tdfx_res_ctx.handle=ctx.handle;
return 0;
}
int tdfx_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
return 0;
}
int tdfx_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
return tdfx_context_switch(dev, dev->last_context, ctx.handle);
}
int tdfx_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
tdfx_context_switch_complete(dev, ctx.handle);
return 0;
}
int tdfx_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
DRM_DEBUG("%d\n", ctx.handle);
/* This is currently a noop because we
don't reuse context values. Perhaps we
should? */
return 0;
}

653
linux/tdfx_drv.c Normal file
View File

@ -0,0 +1,653 @@
/* tdfx.c -- tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI$
* $XFree86$
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "tdfx_drv.h"
EXPORT_SYMBOL(tdfx_init);
EXPORT_SYMBOL(tdfx_cleanup);
#define TDFX_NAME "tdfx"
#define TDFX_DESC "tdfx"
#define TDFX_DATE "19991009"
#define TDFX_MAJOR 0
#define TDFX_MINOR 0
#define TDFX_PATCHLEVEL 1
static drm_device_t tdfx_device;
drm_ctx_t tdfx_res_ctx;
static struct file_operations tdfx_fops = {
open: tdfx_open,
flush: drm_flush,
release: tdfx_release,
ioctl: tdfx_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice tdfx_misc = {
minor: MISC_DYNAMIC_MINOR,
name: TDFX_NAME,
fops: &tdfx_fops,
};
static drm_ioctl_desc_t tdfx_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
};
#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *tdfx = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("tdfx");
MODULE_PARM(tdfx, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
return tdfx_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
tdfx_cleanup();
}
#endif
#ifndef MODULE
/* tdfx_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init tdfx_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int tdfx_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
tdfx_res_ctx.handle=-1;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int tdfx_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* tdfx_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int tdfx_init(void)
{
int retcode;
drm_device_t *dev = &tdfx_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(tdfx);
#endif
if ((retcode = misc_register(&tdfx_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
dev->name = TDFX_NAME;
drm_mem_init();
drm_proc_init(dev);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
TDFX_NAME,
TDFX_MAJOR,
TDFX_MINOR,
TDFX_PATCHLEVEL,
TDFX_DATE,
tdfx_misc.minor);
return 0;
}
/* tdfx_cleanup is called via cleanup_module at module unload time. */
void tdfx_cleanup(void)
{
drm_device_t *dev = &tdfx_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&tdfx_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
tdfx_takedown(dev);
}
int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = TDFX_MAJOR;
version.version_minor = TDFX_MINOR;
version.version_patchlevel = TDFX_PATCHLEVEL;
DRM_COPY(version.name, TDFX_NAME);
DRM_COPY(version.date, TDFX_DATE);
DRM_COPY(version.desc, TDFX_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int tdfx_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &tdfx_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return tdfx_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int tdfx_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return tdfx_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= TDFX_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &tdfx_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
#if 0
/* dev->queue_count == 0 right now for
tdfx. FIXME? */
if (lock.context < 0 || lock.context >= dev->queue_count)
return -EINVAL;
#endif
if (!ret) {
#if 0
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (lock.context == tdfx_res_ctx.handle &&
j >= 0 && j < DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
lock.context, current->pid, j,
dev->lock.lock_time, jiffies);
current->state = TASK_INTERRUPTIBLE;
current->policy |= SCHED_YIELD;
schedule_timeout(DRM_LOCK_SLICE-j);
DRM_DEBUG("jiffies=%d\n", jiffies);
}
}
#endif
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
#if 1
current->policy |= SCHED_YIELD;
#endif
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != tdfx_res_ctx.handle &&
dev->last_context != tdfx_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
tdfx_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = -EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
#if 0
tdfx_quiescent(dev);
#endif
}
}
#if 0
DRM_ERROR("pid = %5d, old counter = %5ld\n",
current->pid, current->counter);
#endif
if (lock.context != tdfx_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY/4;
}
#if 0
while (current->counter > 25)
current->counter >>= 1; /* decrease time slice */
DRM_ERROR("pid = %5d, new counter = %5ld\n",
current->pid, current->counter);
#endif
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}
int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
/* FIXME: Try to send data to card here */
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if 0
current->policy |= SCHED_YIELD;
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(1000);
#endif
if (lock.context != tdfx_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY;
}
#if 0
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(10);
#endif
return 0;
}

68
linux/tdfx_drv.h Normal file
View File

@ -0,0 +1,68 @@
/* tdfx_drv.h -- Private header for tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:40:04 1999 by faith@precisioninsight.com
* Revised: Sat Oct 9 23:38:19 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI$
* $XFree86$
*
*/
#ifndef _TDFX_DRV_H_
#define _TDFX_DRV_H_
/* tdfx_drv.c */
extern int tdfx_init(void);
extern void tdfx_cleanup(void);
extern int tdfx_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_open(struct inode *inode, struct file *filp);
extern int tdfx_release(struct inode *inode, struct file *filp);
extern int tdfx_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* tdfx_context.c */
extern int tdfx_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_modctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int tdfx_context_switch(drm_device_t *dev, int old, int new);
extern int tdfx_context_switch_complete(drm_device_t *dev, int new);
#endif

264
linux/vm.c Normal file
View File

@ -0,0 +1,264 @@
/* vm.c -- Memory mapping for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 22:48:11 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.7 1999/08/21 02:48:34 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.1 1999/09/25 14:38:02 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
struct vm_operations_struct drm_vm_ops = {
nopage: drm_vm_nopage,
open: drm_vm_open,
close: drm_vm_close,
};
struct vm_operations_struct drm_vm_shm_ops = {
nopage: drm_vm_shm_nopage,
open: drm_vm_open,
close: drm_vm_close,
};
struct vm_operations_struct drm_vm_dma_ops = {
nopage: drm_vm_dma_nopage,
open: drm_vm_open,
close: drm_vm_close,
};
unsigned long drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access)
{
DRM_DEBUG("0x%08lx, %d\n", address, write_access);
return 0; /* Disallow mremap */
}
unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
unsigned long physical;
unsigned long offset;
unsigned long page;
if (address > vma->vm_end) return 0; /* Disallow mremap */
if (!dev->lock.hw_lock) return 0; /* Nothing allocated */
offset = address - vma->vm_start;
page = offset >> PAGE_SHIFT;
physical = (unsigned long)dev->lock.hw_lock + (offset & (~PAGE_MASK));
atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
return physical;
}
unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
unsigned long physical;
unsigned long offset;
unsigned long page;
if (!dma) return 0; /* Error */
if (address > vma->vm_end) return 0; /* Disallow mremap */
if (!dma->pagelist) return 0; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_offset should be 0 */
page = offset >> PAGE_SHIFT;
physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
return physical;
}
void drm_vm_open(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *vma_entry;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
MOD_INC_USE_COUNT;
#if DRM_DEBUG_CODE
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
down(&dev->struct_sem);
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
up(&dev->struct_sem);
}
#endif
}
void drm_vm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *pt, *prev;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
MOD_DEC_USE_COUNT;
atomic_dec(&dev->vma_count);
#if DRM_DEBUG_CODE
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
}
up(&dev->struct_sem);
#endif
}
int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
unsigned long length = vma->vm_end - vma->vm_start;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_offset);
/* Length must match exact page count */
if ((length >> PAGE_SHIFT) != dma->page_count) return -EINVAL;
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
/* In Linux 2.2.3 and above, this is
handled in do_mmap() in mm/mmap.c. */
++filp->f_count;
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
return 0;
}
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
int i;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_offset);
if (!vma->vm_offset) return drm_mmap_dma(filp, vma);
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
if (map->offset == vma->vm_offset) break;
}
if (i >= dev->map_count) return -EINVAL;
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
/* Check for valid size. */
if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
switch (map->type) {
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
if (vma->vm_offset >= __pa(high_memory)) {
#if defined(__i386__)
if (boot_cpu_data.x86 > 3) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
}
if (remap_page_range(vma->vm_start,
vma->vm_offset,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
vma->vm_flags |= VM_LOCKED;
break;
default:
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
if (map->flags & _DRM_READ_ONLY) {
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
}
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
/* In Linux 2.2.3 and above, this is
handled in do_mmap() in mm/mmap.c. */
++filp->f_count;
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
return 0;
}

277
shared-core/drm.h Normal file
View File

@ -0,0 +1,277 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 13:08:18 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.1 1999/09/25 14:37:58 dawes Exp $
*
*/
#ifndef _DRM_H_
#define _DRM_H_
#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_PROC_DEVICES "/proc/devices"
#define DRM_PROC_MISC "/proc/misc"
#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
#define DRM_NAME "drm" /* Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
typedef struct drm_version {
int version_major; /* Major version */
int version_minor; /* Minor version */
int version_patchlevel;/* Patch level */
size_t name_len; /* Length of name buffer */
char *name; /* Name of driver */
size_t date_len; /* Length of date buffer */
char *date; /* User-space buffer to hold date */
size_t desc_len; /* Length of desc buffer */
char *desc; /* User-space buffer to hold desc */
} drm_version_t;
typedef struct drm_unique {
size_t unique_len; /* Length of unique */
char *unique; /* Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /* Length of user-space structures */
drm_version_t *version;
} drm_list_t;
typedef struct drm_block {
int unused;
} drm_block_t;
typedef struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
} drm_control_t;
typedef enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
_DRM_REGISTERS = 1, /* no caching, no core dump */
_DRM_SHM = 2 /* shared, cached */
} drm_map_type_t;
typedef enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
} drm_map_flags_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
drm_map_type_t type; /* Type of memory to map */
drm_map_flags_t flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* MTRR slot used */
/* Private data */
} drm_map_t;
typedef enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */
} drm_lock_flags_t;
typedef struct drm_lock {
int context;
drm_lock_flags_t flags;
} drm_lock_t;
typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched.
Note, the buffer may not yet have
been processed by the hardware --
getting a hardware lock with the
hardware quiescent will ensure
that the buffer has been
processed. */
_DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /* Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */
_DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */
} drm_dma_flags_t;
typedef struct drm_buf_desc {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
enum {
DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */
} flags;
} drm_buf_desc_t;
typedef struct drm_buf_info {
int count; /* Entries in list */
drm_buf_desc_t *list;
} drm_buf_info_t;
typedef struct drm_buf_free {
int count;
int *list;
} drm_buf_free_t;
typedef struct drm_buf_pub {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int used; /* Amount of buffer in use (for DMA) */
void *address; /* Address of buffer */
} drm_buf_pub_t;
typedef struct drm_buf_map {
int count; /* Length of buflist */
void *virtual; /* Mmaped area in user-virtual */
drm_buf_pub_t *list; /* Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int context; /* Context handle */
int send_count; /* Number of buffers to send */
int *send_indices; /* List of handles to buffers */
int *send_sizes; /* Lengths of data to send */
drm_dma_flags_t flags; /* Flags */
int request_count; /* Number of buffers requested */
int request_size; /* Desired size for buffers */
int *request_indices; /* Buffer information */
int *request_sizes;
int granted_count; /* Number of buffers granted */
} drm_dma_t;
typedef enum {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
} drm_ctx_flags_t;
typedef struct drm_ctx {
drm_context_t handle;
drm_ctx_flags_t flags;
} drm_ctx_t;
typedef struct drm_ctx_res {
int count;
drm_ctx_t *contexts;
} drm_ctx_res_t;
typedef struct drm_draw {
drm_drawable_t handle;
} drm_draw_t;
typedef struct drm_auth {
drm_magic_t magic;
} drm_auth_t;
typedef struct drm_irq_busid {
int irq;
int busnum;
int devnum;
int funcnum;
} drm_irq_busid_t;
#define DRM_IOCTL_BASE 'd'
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
#endif

277
shared/drm.h Normal file
View File

@ -0,0 +1,277 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Fri Aug 20 13:08:18 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.1 1999/09/25 14:37:58 dawes Exp $
*
*/
#ifndef _DRM_H_
#define _DRM_H_
#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_PROC_DEVICES "/proc/devices"
#define DRM_PROC_MISC "/proc/misc"
#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
#define DRM_NAME "drm" /* Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
typedef struct drm_version {
int version_major; /* Major version */
int version_minor; /* Minor version */
int version_patchlevel;/* Patch level */
size_t name_len; /* Length of name buffer */
char *name; /* Name of driver */
size_t date_len; /* Length of date buffer */
char *date; /* User-space buffer to hold date */
size_t desc_len; /* Length of desc buffer */
char *desc; /* User-space buffer to hold desc */
} drm_version_t;
typedef struct drm_unique {
size_t unique_len; /* Length of unique */
char *unique; /* Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /* Length of user-space structures */
drm_version_t *version;
} drm_list_t;
typedef struct drm_block {
int unused;
} drm_block_t;
typedef struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
} drm_control_t;
typedef enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
_DRM_REGISTERS = 1, /* no caching, no core dump */
_DRM_SHM = 2 /* shared, cached */
} drm_map_type_t;
typedef enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /* Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
} drm_map_flags_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
drm_map_type_t type; /* Type of memory to map */
drm_map_flags_t flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* MTRR slot used */
/* Private data */
} drm_map_t;
typedef enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /* Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /* Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /* Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /* Halt all current queues */
} drm_lock_flags_t;
typedef struct drm_lock {
int context;
drm_lock_flags_t flags;
} drm_lock_t;
typedef enum drm_dma_flags { /* These values *MUST* match xf86drm.h */
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /* Block until buffer dispatched.
Note, the buffer may not yet have
been processed by the hardware --
getting a hardware lock with the
hardware quiescent will ensure
that the buffer has been
processed. */
_DRM_DMA_WHILE_LOCKED = 0x02, /* Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /* High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /* Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /* Smaller-than-requested buffers ok */
_DRM_DMA_LARGER_OK = 0x40 /* Larger-than-requested buffers ok */
} drm_dma_flags_t;
typedef struct drm_buf_desc {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
enum {
DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */
} flags;
} drm_buf_desc_t;
typedef struct drm_buf_info {
int count; /* Entries in list */
drm_buf_desc_t *list;
} drm_buf_info_t;
typedef struct drm_buf_free {
int count;
int *list;
} drm_buf_free_t;
typedef struct drm_buf_pub {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int used; /* Amount of buffer in use (for DMA) */
void *address; /* Address of buffer */
} drm_buf_pub_t;
typedef struct drm_buf_map {
int count; /* Length of buflist */
void *virtual; /* Mmaped area in user-virtual */
drm_buf_pub_t *list; /* Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int context; /* Context handle */
int send_count; /* Number of buffers to send */
int *send_indices; /* List of handles to buffers */
int *send_sizes; /* Lengths of data to send */
drm_dma_flags_t flags; /* Flags */
int request_count; /* Number of buffers requested */
int request_size; /* Desired size for buffers */
int *request_indices; /* Buffer information */
int *request_sizes;
int granted_count; /* Number of buffers granted */
} drm_dma_t;
typedef enum {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
} drm_ctx_flags_t;
typedef struct drm_ctx {
drm_context_t handle;
drm_ctx_flags_t flags;
} drm_ctx_t;
typedef struct drm_ctx_res {
int count;
drm_ctx_t *contexts;
} drm_ctx_res_t;
typedef struct drm_draw {
drm_drawable_t handle;
} drm_draw_t;
typedef struct drm_auth {
drm_magic_t magic;
} drm_auth_t;
typedef struct drm_irq_busid {
int irq;
int busnum;
int devnum;
int funcnum;
} drm_irq_busid_t;
#define DRM_IOCTL_BASE 'd'
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
#define DRM_IOCTL_GET_MAGIC DRM_IOW( 0x02, drm_auth_t)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
#endif

417
tests/drmstat.c Normal file
View File

@ -0,0 +1,417 @@
/* drmstat.c -- DRM device status and testing program
* Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com
* Revised: Sun Aug 1 11:02:00 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.28 1999/08/04 18:12:11 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.1 1999/09/25 14:37:59 dawes Exp $
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <getopt.h>
#include <strings.h>
#include <errno.h>
#include <signal.h>
#include <fcntl.h>
#include "xf86drm.h"
int sigio_fd;
static double usec(struct timeval *end, struct timeval *start)
{
double e = end->tv_sec * 1000000 + end->tv_usec;
double s = start->tv_sec * 1000000 + start->tv_usec;
return e - s;
}
static void getversion(int fd)
{
drmVersionPtr version;
version = drmGetVersion(fd);
if (version) {
printf( "Name: %s\n", version->name ? version->name : "?" );
printf( " Version: %d.%d.%d\n",
version->version_major,
version->version_minor,
version->version_patchlevel );
printf( " Date: %s\n", version->date ? version->date : "?" );
printf( " Desc: %s\n", version->desc ? version->desc : "?" );
drmFreeVersion(version);
} else {
printf( "No driver available\n" );
}
}
void handler(int fd, void *oldctx, void *newctx)
{
printf("Got fd %d\n", fd);
}
void process_sigio(char *device)
{
int fd;
if ((fd = open(device, 0)) < 0) {
drmError(-errno, __FUNCTION__);
exit(1);
}
sigio_fd = fd;
drmInstallSIGIOHandler(fd, handler);
for (;;) sleep(60);
}
int main(int argc, char **argv)
{
int c;
int r = 0;
int fd = -1;
drmHandle handle;
void *address;
char *pt;
unsigned long count;
unsigned long offset;
unsigned long size;
drmContext context;
int loops;
char buf[1024];
int i;
drmBufInfoPtr info;
drmBufMapPtr bufs;
drmLockPtr lock;
int secs;
while ((c = getopt(argc, argv,
"lc:vo:O:f:s:w:W:b:r:R:P:L:C:XS:B:F:")) != EOF)
switch (c) {
case 'F':
count = strtoul(optarg, NULL, 0);
if (!fork()) {
dup(fd);
sleep(count);
}
close(fd);
break;
case 'v': getversion(fd); break;
case 'X':
if ((r = drmCreateContext(fd, &context))) {
drmError(r, argv[0]);
return 1;
}
printf( "Got %d\n", context);
break;
case 'S':
process_sigio(optarg);
break;
case 'C':
if ((r = drmSwitchToContext(fd, strtoul(optarg, NULL, 0)))) {
drmError(r, argv[0]);
return 1;
}
break;
case 'c':
if ((r = drmSetBusid(fd,optarg))) {
drmError(r, argv[0]);
return 1;
}
break;
case 'o':
if ((fd = drmOpen(optarg, NULL)) < 0) {
drmError(fd, argv[0]);
return 1;
}
break;
case 'O':
if ((fd = drmOpen(NULL, optarg)) < 0) {
drmError(fd, argv[0]);
return 1;
}
break;
case 'B': /* Test buffer allocation */
count = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, &pt, 0);
secs = strtoul(pt+1, NULL, 0);
{
drmDMAReq dma;
int *indices, *sizes;
indices = alloca(sizeof(*indices) * count);
sizes = alloca(sizeof(*sizes) * count);
dma.context = context;
dma.send_count = 0;
dma.request_count = count;
dma.request_size = size;
dma.request_list = indices;
dma.request_sizes = sizes;
dma.flags = DRM_DMA_WAIT;
if ((r = drmDMA(fd, &dma))) {
drmError(r, argv[0]);
return 1;
}
for (i = 0; i < dma.granted_count; i++) {
printf("%5d: index = %d, size = %d\n",
i, dma.request_list[i], dma.request_sizes[i]);
}
sleep(secs);
drmFreeBufs(fd, dma.granted_count, indices);
}
break;
case 'b':
count = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
if ((r = drmAddBufs(fd, count, size, 0)) < 0) {
drmError(r, argv[0]);
return 1;
}
if (!(info = drmGetBufInfo(fd))) {
drmError(0, argv[0]);
return 1;
}
for (i = 0; i < info->count; i++) {
printf("%5d buffers of size %6d (low = %d, high = %d)\n",
info->list[i].count,
info->list[i].size,
info->list[i].low_mark,
info->list[i].high_mark);
}
if ((r = drmMarkBufs(fd, 0.50, 0.80))) {
drmError(r, argv[0]);
return 1;
}
if (!(info = drmGetBufInfo(fd))) {
drmError(0, argv[0]);
return 1;
}
for (i = 0; i < info->count; i++) {
printf("%5d buffers of size %6d (low = %d, high = %d)\n",
info->list[i].count,
info->list[i].size,
info->list[i].low_mark,
info->list[i].high_mark);
}
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
#if 1
if (!(bufs = drmMapBufs(fd))) {
drmError(0, argv[0]);
return 1;
}
printf("===============================\n");
printf( "%d bufs\n", bufs->count);
for (i = 0; i < bufs->count; i++) {
printf( " %4d: %8d bytes at %p\n",
i,
bufs->list[i].total,
bufs->list[i].address);
}
printf("===== /proc/drm/1/vmainfo =====\n");
sprintf(buf, "cat /proc/drm/1/vmainfo");
system(buf);
#endif
break;
case 'f':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
handle = 0;
if ((r = drmAddMap(fd, offset, size,
DRM_FRAME_BUFFER, 0, &handle))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx added\n", offset, size);
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
break;
case 'r':
case 'R':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
handle = 0;
if ((r = drmAddMap(fd, offset, size,
DRM_REGISTERS,
c == 'R' ? DRM_READ_ONLY : 0,
&handle))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx added\n", offset, size);
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
break;
case 's':
size = strtoul(optarg, &pt, 0);
handle = 0;
if ((r = drmAddMap(fd, 0, size,
DRM_SHM, DRM_CONTAINS_LOCK,
&handle))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%04lx byte shm added at 0x%08lx\n", size, handle);
sprintf(buf, "cat /proc/graphics/0/vm");
system(buf);
break;
case 'P':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
address = NULL;
if ((r = drmMap(fd, offset, size, &address))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
offset, size, address, getpid());
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
system(buf);
mprotect((void *)offset, size, PROT_READ);
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
system(buf);
break;
case 'w':
case 'W':
offset = strtoul(optarg, &pt, 0);
size = strtoul(pt+1, NULL, 0);
address = NULL;
if ((r = drmMap(fd, offset, size, &address))) {
drmError(r, argv[0]);
return 1;
}
printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
offset, size, address, getpid());
printf("===== /proc/%d/maps =====\n", getpid());
sprintf(buf, "cat /proc/%d/maps", getpid());
system(buf);
printf("===== /proc/drm/1/meminfo =====\n");
sprintf(buf, "cat /proc/drm/1/meminfo");
system(buf);
printf("===== /proc/drm/1/vmainfo =====\n");
sprintf(buf, "cat /proc/drm/1/vmainfo");
system(buf);
printf("===== READING =====\n");
for (i = 0; i < 0x10; i++)
printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
printf("\n");
if (c == 'w') {
printf("===== WRITING =====\n");
for (i = 0; i < size; i+=2) {
((char *)address)[i] = i & 0xff;
((char *)address)[i+1] = i & 0xff;
}
}
printf("===== READING =====\n");
for (i = 0; i < 0x10; i++)
printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
printf("\n");
printf("===== /proc/drm/1/vmainfo =====\n");
sprintf(buf, "cat /proc/drm/1/vmainfo");
system(buf);
break;
case 'L':
context = strtoul(optarg, &pt, 0);
offset = strtoul(pt+1, &pt, 0);
size = strtoul(pt+1, &pt, 0);
loops = strtoul(pt+1, NULL, 0);
address = NULL;
if ((r = drmMap(fd, offset, size, &address))) {
drmError(r, argv[0]);
return 1;
}
lock = address;
#if 1
{
int counter = 0;
struct timeval loop_start, loop_end;
struct timeval lock_start, lock_end;
double wt;
#define HISTOSIZE 9
int histo[HISTOSIZE];
int output = 0;
int fast = 0;
if (loops < 0) {
loops = -loops;
++output;
}
for (i = 0; i < HISTOSIZE; i++) histo[i] = 0;
gettimeofday(&loop_start, NULL);
for (i = 0; i < loops; i++) {
gettimeofday(&lock_start, NULL);
DRM_LIGHT_LOCK_COUNT(fd,lock,context,fast);
gettimeofday(&lock_end, NULL);
DRM_UNLOCK(fd,lock,context);
++counter;
wt = usec(&lock_end, &lock_start);
if (wt <= 2.5) ++histo[8];
if (wt < 5.0) ++histo[0];
else if (wt < 50.0) ++histo[1];
else if (wt < 500.0) ++histo[2];
else if (wt < 5000.0) ++histo[3];
else if (wt < 50000.0) ++histo[4];
else if (wt < 500000.0) ++histo[5];
else if (wt < 5000000.0) ++histo[6];
else ++histo[7];
if (output) printf( "%.2f uSec, %d fast\n", wt, fast);
}
gettimeofday(&loop_end, NULL);
printf( "Average wait time = %.2f usec, %d fast\n",
usec(&loop_end, &loop_start) / counter, fast);
printf( "%9d <= 2.5 uS\n", histo[8]);
printf( "%9d < 5 uS\n", histo[0]);
printf( "%9d < 50 uS\n", histo[1]);
printf( "%9d < 500 uS\n", histo[2]);
printf( "%9d < 5000 uS\n", histo[3]);
printf( "%9d < 50000 uS\n", histo[4]);
printf( "%9d < 500000 uS\n", histo[5]);
printf( "%9d < 5000000 uS\n", histo[6]);
printf( "%9d >= 5000000 uS\n", histo[7]);
}
#else
printf( "before lock: 0x%08x\n", lock->lock);
printf( "lock: 0x%08x\n", lock->lock);
sleep(5);
printf( "unlock: 0x%08x\n", lock->lock);
#endif
break;
default:
fprintf( stderr, "Usage: drmstat [options]\n" );
return 1;
}
return r;
}