Merge branch 'master' into nouveau-1

Conflicts:

	linux-core/Makefile.kernel
main
Dave Airlie 2006-11-06 08:03:18 +11:00
commit 1e90b7ee8c
57 changed files with 7379 additions and 238 deletions

View File

@ -1,4 +1,4 @@
/* i915_drv.c -- ATI Radeon driver -*- linux-c -*-
/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
* Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
*/
/*-

View File

@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libdrm], 2.0.2, [dri-devel@lists.sourceforge.net], libdrm)
AC_INIT([libdrm], 2.2.0, [dri-devel@lists.sourceforge.net], libdrm)
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
@ -30,6 +30,7 @@ AC_PROG_LIBTOOL
AC_PROG_CC
AC_HEADER_STDC
AC_SYS_LARGEFILE
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)

View File

@ -26,6 +26,6 @@ AM_CFLAGS = -I$(top_srcdir)/shared-core
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c
libdrmincludedir = ${includedir}
libdrminclude_HEADERS = xf86drm.h
libdrminclude_HEADERS = xf86drm.h xf86mm.h
EXTRA_DIST = ChangeLog TODO

View File

@ -46,6 +46,9 @@
# include <sys/mman.h>
# endif
#else
# ifdef HAVE_CONFIG_H
# include <config.h>
# endif
# include <stdio.h>
# include <stdlib.h>
# include <unistd.h>
@ -66,6 +69,7 @@
# include "drm.h"
#endif
/* Not all systems have MAP_FAILED defined */
#ifndef MAP_FAILED
#define MAP_FAILED ((void *)-1)
@ -713,7 +717,7 @@ drmVersionPtr drmGetLibVersion(int fd)
* revision 1.2.x = added drmSetInterfaceVersion
* modified drmOpen to handle both busid and name
*/
version->version_major = 1;
version->version_major = 2;
version->version_minor = 2;
version->version_patchlevel = 0;
@ -2252,3 +2256,960 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
}
return 0;
}
/*
* Valid flags are
* DRM_FENCE_FLAG_EMIT
* DRM_FENCE_FLAG_SHAREABLE
* DRM_FENCE_MASK_DRIVER
*/
int drmFenceCreate(int fd, unsigned flags, int class,unsigned type,
drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.type = type;
arg.class = class;
arg.op = drm_fence_create;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
fence->type = arg.type;
fence->flags = arg.flags;
fence->signaled = 0;
return 0;
}
/*
* Valid flags are
* DRM_FENCE_FLAG_SHAREABLE
* DRM_FENCE_MASK_DRIVER
*/
int drmFenceBuffers(int fd, unsigned flags, drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.flags = flags;
arg.op = drm_fence_buffers;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
fence->type = arg.type;
fence->flags = arg.flags;
fence->signaled = 0;
return 0;
}
int drmFenceDestroy(int fd, const drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.op = drm_fence_destroy;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
return 0;
}
int drmFenceReference(int fd, unsigned handle, drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.handle = handle;
arg.op = drm_fence_reference;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
fence->type = arg.type;
fence->flags = arg.flags;
fence->signaled = arg.signaled;
return 0;
}
int drmFenceUnreference(int fd, const drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.op = drm_fence_unreference;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
return 0;
}
int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.type = flush_type;
arg.op = drm_fence_flush;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
fence->signaled = arg.signaled;
return 0;
}
int drmFenceUpdate(int fd, drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.op = drm_fence_signaled;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
fence->signaled = arg.signaled;
return 0;
}
int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType,
int *signaled)
{
int
ret;
if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) ||
((fenceType & fence->signaled) != fenceType)) {
ret = drmFenceFlush(fd, fence, fenceType);
if (ret)
return ret;
}
*signaled = ((fenceType & fence->signaled) == fenceType);
return 0;
}
/*
* Valid flags are
* DRM_FENCE_FLAG_SHAREABLE
* DRM_FENCE_MASK_DRIVER
*/
int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.flags = flags;
arg.handle = fence->handle;
arg.type = emit_type;
arg.op = drm_fence_emit;
if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
fence->signaled = arg.signaled;
return 0;
}
/*
* Valid flags are
* DRM_FENCE_FLAG_WAIT_LAZY
* DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS
*/
int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
{
drm_fence_arg_t arg;
int ret;
if (flush_type == 0) {
flush_type = fence->type;
}
if (!(fence->flags & DRM_FENCE_FLAG_SHAREABLE)) {
if ((flush_type & fence->signaled) == flush_type) {
return 0;
}
}
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.type = flush_type;
arg.flags = flags;
arg.op = drm_fence_wait;
do {
ret = ioctl(fd, DRM_IOCTL_FENCE, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
fence->class = arg.class;
fence->type = arg.type;
fence->signaled = arg.signaled;
return 0;
}
static int drmAdjustListNodes(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
int ret = 0;
while(list->numCurrent < list->numTarget) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
ret = -ENOMEM;
break;
}
list->numCurrent++;
DRMLISTADD(&node->head, &list->free);
}
while(list->numCurrent > list->numTarget) {
l = list->free.next;
if (l == &list->free)
break;
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
list->numCurrent--;
}
return ret;
}
void drmBOFreeList(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
l = list->list.next;
while(l != &list->list) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->free.next;
list->numCurrent--;
list->numOnList--;
}
l = list->free.next;
while(l != &list->free) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->free.next;
list->numCurrent--;
}
}
int drmBOResetList(drmBOList *list) {
drmMMListHead *l;
int ret;
ret = drmAdjustListNodes(list);
if (ret)
return ret;
l = list->list.next;
while(l != &list->list) {
DRMLISTDEL(l);
DRMLISTADD(l, &list->free);
list->numOnList--;
l = list->list.next;
}
return drmAdjustListNodes(list);
}
static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
unsigned long arg0,
unsigned long arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
} else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADD(&node->head, &list->list);
list->numOnList++;
return node;
}
void *drmBOListIterator(drmBOList *list)
{
void *ret = list->list.next;
if (ret == &list->list)
return NULL;
return ret;
}
void *drmBOListNext(drmBOList *list, void *iterator)
{
void *ret;
drmMMListHead *l = (drmMMListHead *) iterator;
ret = l->next;
if (ret == &list->list)
return NULL;
return ret;
}
drmBO *drmBOListBuf(void *iterator)
{
drmBONode *node;
drmMMListHead *l = (drmMMListHead *) iterator;
node = DRMLISTENTRY(drmBONode, l, head);
return node->buf;
}
int drmBOCreateList(int numTarget, drmBOList *list)
{
DRMINITLISTHEAD(&list->list);
DRMINITLISTHEAD(&list->free);
list->numTarget = numTarget;
list->numCurrent = 0;
list->numOnList = 0;
return drmAdjustListNodes(list);
}
static void drmBOCopyReply(const drm_bo_arg_reply_t *rep,
drmBO *buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
buf->size = rep->size;
buf->offset = rep->offset;
buf->mapHandle = rep->arg_handle;
buf->mask = rep->mask;
buf->start = rep->buffer_start;
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
buf->pageAlignment = rep->page_alignment;
}
int drmBOCreate(int fd, unsigned long start, unsigned long size,
unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
unsigned mask,
unsigned hint, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret;
memset(buf, 0, sizeof(*buf));
memset(&arg, 0, sizeof(arg));
req->mask = mask;
req->hint = hint;
req->size = size;
req->type = type;
req->page_alignment = pageAlignment;
buf->virtual = NULL;
switch(type) {
case drm_bo_type_dc:
req->buffer_start = start;
break;
case drm_bo_type_user:
req->buffer_start = (unsigned long) user_buffer;
buf->virtual = user_buffer;
break;
case drm_bo_type_fake:
req->buffer_start = start;
break;
default:
return -EINVAL;
}
req->op = drm_bo_create;
do {
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
if (!arg.handled) {
return -EFAULT;
}
if (rep->ret) {
fprintf(stderr, "Error %d\n", rep->ret);
return rep->ret;
}
drmBOCopyReply(rep, buf);
buf->mapVirtual = NULL;
buf->mapCount = 0;
return 0;
}
int drmBODestroy(int fd, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
buf->mapVirtual = NULL;
buf->virtual = NULL;
}
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->op = drm_bo_destroy;
if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
return -errno;
if (!arg.handled) {
return -EFAULT;
}
if (rep->ret) {
return rep->ret;
}
buf->handle = 0;
return 0;
}
int drmBOReference(int fd, unsigned handle, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
memset(&arg, 0, sizeof(arg));
req->handle = handle;
req->op = drm_bo_reference;
if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
return -errno;
if (!arg.handled) {
return -EFAULT;
}
if (rep->ret) {
return rep->ret;
}
drmBOCopyReply(rep, buf);
buf->type = drm_bo_type_dc;
buf->mapVirtual = NULL;
buf->mapCount = 0;
buf->virtual = NULL;
return 0;
}
int drmBOUnReference(int fd, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) munmap(buf->mapVirtual, buf->start + buf->size);
buf->mapVirtual = NULL;
buf->virtual = NULL;
}
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->op = drm_bo_unreference;
if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
return -errno;
if (!arg.handled) {
return -EFAULT;
}
if (rep->ret) {
return rep->ret;
}
buf->handle = 0;
return 0;
}
/*
* Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together
* Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the
* call return an -EBUSY if it can' immediately honor the mapping request.
*/
int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret = 0;
/*
* Make sure we have a virtual address of the buffer.
*/
if (!buf->virtual && buf->type != drm_bo_type_fake) {
drmAddress virtual;
virtual = mmap(0, buf->size + buf->start,
PROT_READ | PROT_WRITE, MAP_SHARED,
fd, buf->mapHandle);
if (virtual == MAP_FAILED) {
ret = -errno;
}
if (ret)
return ret;
buf->mapVirtual = virtual;
buf->virtual = ((char *) virtual) + buf->start;
}
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->mask = mapFlags;
req->hint = mapHint;
req->op = drm_bo_map;
/*
* May hang if the buffer object is busy.
* This IOCTL synchronizes the buffer.
*/
do {
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return ret;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
drmBOCopyReply(rep, buf);
buf->mapFlags = mapFlags;
++buf->mapCount;
*address = buf->virtual;
return 0;
}
int drmBOUnmap(int fd, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->op = drm_bo_unmap;
if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) {
return -errno;
}
if (!arg.handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
return 0;
}
int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
unsigned hint)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->mask = flags;
req->hint = hint;
req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */
req->op = drm_bo_validate;
do{
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
} while (ret && errno == EAGAIN);
if (ret)
return ret;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
drmBOCopyReply(rep, buf);
return 0;
}
int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->mask = flags;
req->arg_handle = fenceHandle;
req->op = drm_bo_validate;
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
if (ret)
return ret;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
return 0;
}
int drmBOInfo(int fd, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->op = drm_bo_info;
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
if (ret)
return ret;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
drmBOCopyReply(rep, buf);
return 0;
}
int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret = 0;
if ((buf->flags & DRM_BO_FLAG_SHAREABLE) ||
(buf->replyFlags & DRM_BO_REP_BUSY)) {
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->op = drm_bo_wait_idle;
req->hint = hint;
do {
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
} while (ret && errno == EAGAIN);
if (ret)
return ret;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
drmBOCopyReply(rep, buf);
}
return 0;
}
int drmBOBusy(int fd, drmBO *buf, int *busy)
{
if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
!(buf->replyFlags & DRM_BO_REP_BUSY)) {
*busy = 0;
return 0;
} else {
int ret = drmBOInfo(fd, buf);
if (ret)
return ret;
*busy = (buf->replyFlags & DRM_BO_REP_BUSY);
return 0;
}
}
int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
unsigned mask,
int *newItem)
{
drmBONode *node, *cur;
drmMMListHead *l;
*newItem = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
}
if (!cur) {
cur = drmAddListItem(list, buf, flags, mask);
if (!cur) {
drmMsg("Out of memory creating validate list node.\n");
return -ENOMEM;
}
*newItem = 1;
cur->arg0 = flags;
cur->arg1 = mask;
} else {
unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
unsigned memFlags = cur->arg0 & flags & memMask;
if (!memFlags) {
drmMsg("Incompatible memory location requests "
"on validate list.\n");
drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n",
cur->arg0, cur->arg1);
drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n",
flags, mask);
return -EINVAL;
}
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
drmMsg("Incompatible buffer flag requests "
"on validate list.\n");
drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n",
cur->arg0, cur->arg1);
drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n",
flags, mask);
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = memFlags | ((cur->arg0 | flags) &
cur->arg1 & ~DRM_BO_MASK_MEM);
}
return 0;
}
int drmBOValidateList(int fd, drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
drm_bo_arg_t *arg, *first;
drm_bo_arg_request_t *req;
drm_bo_arg_reply_t *rep;
drm_u64_t *prevNext = NULL;
drmBO *buf;
int ret;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
req = &arg->d.req;
if (!first)
first = arg;
if (prevNext)
*prevNext = (unsigned long) arg;
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
req->handle = node->buf->handle;
req->op = drm_bo_validate;
req->mask = node->arg0;
req->hint = 0;
req->arg_handle = node->arg1;
}
if (!first)
return 0;
do{
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
} while (ret && errno == EAGAIN);
if (ret)
return -errno;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep;
if (!arg->handled) {
drmMsg("Unhandled request\n");
return -EFAULT;
}
if (rep->ret)
return rep->ret;
buf = node->buf;
drmBOCopyReply(rep, buf);
}
return 0;
}
int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
{
drmBONode *node;
drmMMListHead *l;
drm_bo_arg_t *arg, *first;
drm_bo_arg_request_t *req;
drm_bo_arg_reply_t *rep;
drm_u64_t *prevNext = NULL;
drmBO *buf;
unsigned fence_flags;
int ret;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
req = &arg->d.req;
if (!first)
first = arg;
if (prevNext)
*prevNext = (unsigned long) arg;
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
req->handle = node->buf->handle;
req->op = drm_bo_fence;
req->mask = node->arg0;
req->arg_handle = fenceHandle;
}
if (!first)
return 0;
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
if (ret)
return -errno;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep;
if (!arg->handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
drmBOCopyReply(rep, node->buf);
}
return 0;
}
int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
unsigned memType)
{
drm_mm_init_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_init;
arg.req.p_offset = pOffset;
arg.req.p_size = pSize;
arg.req.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
int drmMMTakedown(int fd, unsigned memType)
{
drm_mm_init_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_takedown;
arg.req.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
int drmMMLock(int fd, unsigned memType)
{
drm_mm_init_arg_t arg;
int ret;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_lock;
arg.req.mem_type = memType;
do{
ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
} while (ret && errno == EAGAIN);
return ret;
}
int drmMMUnlock(int fd, unsigned memType)
{
drm_mm_init_arg_t arg;
int ret;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_unlock;
arg.req.mem_type = memType;
do{
ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
} while (ret && errno == EAGAIN);
return ret;
}

View File

@ -283,7 +283,6 @@ typedef struct _drmSetVersion {
int drm_dd_minor;
} drmSetVersion, *drmSetVersionPtr;
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
@ -487,6 +486,8 @@ do { register unsigned int __old __asm("o0"); \
} \
} while(0)
/* General user-level programmer's API: unprivileged */
extern int drmAvailable(void);
extern int drmOpen(const char *name, const char *busid);
@ -636,4 +637,6 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
unsigned long *prev_key, void **prev_value,
unsigned long *next_key, void **next_value);
#include "xf86mm.h"
#endif

209
libdrm/xf86mm.h Normal file
View File

@ -0,0 +1,209 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#ifndef _XF86MM_H_
#define _XF86MM_H_
#include <stddef.h>
#include "drm.h"
/*
* Note on multithreaded applications using this interface.
* Libdrm is not threadsafe, so common buffer, TTM, and fence objects need to
* be protected using an external mutex.
*
* Note: Don't protect the following functions, as it may lead to deadlocks:
* drmBOUnmap(), drmFenceBuffers().
* The kernel is synchronizing and refcounting buffer maps.
* User space only needs to refcount object usage within the same application.
*/
/*
* List macros heavily inspired by the Linux kernel
* list handling. No list looping yet.
*/
typedef struct _drmMMListHead
{
struct _drmMMListHead *prev;
struct _drmMMListHead *next;
} drmMMListHead;
#define DRMINITLISTHEAD(__item) \
do{ \
(__item)->prev = (__item); \
(__item)->next = (__item); \
} while (0)
#define DRMLISTADD(__item, __list) \
do { \
(__item)->prev = (__list); \
(__item)->next = (__list)->next; \
(__list)->next->prev = (__item); \
(__list)->next = (__item); \
} while (0)
#define DRMLISTADDTAIL(__item, __list) \
do { \
(__item)->next = (__list); \
(__item)->prev = (__list)->prev; \
(__list)->prev->next = (__item); \
(__list)->prev = (__item); \
} while(0)
#define DRMLISTDEL(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
} while(0)
#define DRMLISTDELINIT(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
(__item)->next = (__item); \
(__item)->prev = (__item); \
} while(0)
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
typedef struct _drmFence{
unsigned handle;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
unsigned pad[4]; /* for future expansion */
} drmFence;
typedef struct _drmBO{
drm_bo_type_t type;
unsigned handle;
drm_u64_t mapHandle;
unsigned flags;
unsigned mask;
unsigned mapFlags;
unsigned long size;
unsigned long offset;
unsigned long start;
unsigned replyFlags;
unsigned fenceFlags;
unsigned pageAlignment;
void *virtual;
void *mapVirtual;
int mapCount;
unsigned pad[8]; /* for future expansion */
} drmBO;
typedef struct _drmBONode {
drmMMListHead head;
drmBO *buf;
drm_bo_arg_t bo_arg;
unsigned long arg0;
unsigned long arg1;
} drmBONode;
typedef struct _drmBOList {
unsigned numTarget;
unsigned numCurrent;
unsigned numOnList;
drmMMListHead list;
drmMMListHead free;
} drmBOList;
/* Fencing */
extern int drmFenceCreate(int fd, unsigned flags, int class,
unsigned type,
drmFence *fence);
extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
extern int drmFenceSignaled(int fd, drmFence *fence,
unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
unsigned flush_type);
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
unsigned emit_type);
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
/*
* Buffer object list functions.
*/
extern void drmBOFreeList(drmBOList *list);
extern int drmBOResetList(drmBOList *list);
extern void *drmBOListIterator(drmBOList *list);
extern void *drmBOListNext(drmBOList *list, void *iterator);
extern drmBO *drmBOListBuf(void *iterator);
extern int drmBOCreateList(int numTarget, drmBOList *list);
/*
* Buffer object functions.
*/
extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
unsigned pageAlignment,void *user_buffer,
drm_bo_type_t type, unsigned mask,
unsigned hint, drmBO *buf);
extern int drmBODestroy(int fd, drmBO *buf);
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
extern int drmBOUnReference(int fd, drmBO *buf);
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address);
extern int drmBOUnmap(int fd, drmBO *buf);
extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
unsigned hint);
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
extern int drmBOInfo(int fd, drmBO *buf);
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
unsigned mask,
int *newItem);
extern int drmBOValidateList(int fd, drmBOList *list);
extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
/*
* Initialization functions.
*/
extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
unsigned memType);
extern int drmMMTakedown(int fd, unsigned memType);
extern int drmMMLock(int fd, unsigned memType);
extern int drmMMUnlock(int fd, unsigned memType);
#endif

View File

@ -12,13 +12,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830_irq.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o nouveau_object.o nouveau_irq.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o

View File

@ -41,7 +41,6 @@
* can build the DRM (part of PI DRI). 4/21/2000 S + B */
#include <asm/current.h>
#endif /* __alpha__ */
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@ -84,6 +83,7 @@
#include <linux/poll.h>
#include <asm/pgalloc.h>
#include "drm.h"
#include <linux/slab.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@ -155,9 +155,18 @@
#define DRM_MEM_CTXLIST 21
#define DRM_MEM_MM 22
#define DRM_MEM_HASHTAB 23
#define DRM_MEM_OBJECTS 24
#define DRM_MEM_FENCE 25
#define DRM_MEM_TTM 26
#define DRM_MEM_BUFOBJ 27
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
#define DRM_MAP_HASH_OFFSET 0x10000000
#define DRM_MAP_HASH_ORDER 12
#define DRM_OBJECT_HASH_ORDER 12
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
#define DRM_MM_INIT_MAX_PAGES 256
/*@}*/
@ -388,6 +397,19 @@ typedef struct drm_buf_entry {
drm_freelist_t freelist;
} drm_buf_entry_t;
/*
* This should be small enough to allow the use of kmalloc for hash tables
* instead of vmalloc.
*/
#define DRM_FILE_HASH_ORDER 8
typedef enum{
_DRM_REF_USE=0,
_DRM_REF_TYPE1,
_DRM_NO_REF_TYPES
} drm_ref_t;
/** File private data */
typedef struct drm_file {
int authenticated;
@ -402,6 +424,18 @@ typedef struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
/*
* The user object hash table is global and resides in the
* drm_device structure. We protect the lists and hash tables with the
* device struct_mutex. A bit coarse-grained but probably the best
* option.
*/
struct list_head refd_objects;
struct list_head user_objects;
drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
void *driver_priv;
} drm_file_t;
@ -503,6 +537,26 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock;
} drm_sigdata_t;
/*
* Generic memory manager structs
*/
typedef struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
void *private;
} drm_mm_node_t;
typedef struct drm_mm {
drm_mm_node_t root_node;
} drm_mm_t;
/**
* Mappings list
*/
@ -510,7 +564,8 @@ typedef struct drm_map_list {
struct list_head head; /**< list head */
drm_hash_item_t hash;
drm_map_t *map; /**< mapping */
unsigned int user_token;
drm_u64_t user_token;
drm_mm_node_t *file_offset_node;
} drm_map_list_t;
typedef drm_map_t drm_local_map_t;
@ -544,21 +599,76 @@ typedef struct ati_pcigart_info {
} drm_ati_pcigart_info;
/*
* Generic memory manager structs
* User space objects and their references.
*/
typedef struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
void *private;
} drm_mm_node_t;
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
typedef struct drm_mm {
drm_mm_node_t root_node;
} drm_mm_t;
typedef enum {
drm_fence_type,
drm_buffer_type,
drm_ttm_type
/*
* Add other user space object types here.
*/
} drm_object_type_t;
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
typedef struct drm_user_object{
drm_hash_item_t hash;
struct list_head list;
drm_object_type_t type;
atomic_t refcount;
int shareable;
drm_file_t *owner;
void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
drm_ref_t ref_action);
void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
drm_ref_t unref_action);
void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
} drm_user_object_t;
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
typedef struct drm_ref_object {
drm_hash_item_t hash;
struct list_head list;
atomic_t refcount;
drm_ref_t unref_action;
} drm_ref_object_t;
#include "drm_ttm.h"
/*
* buffer object driver
*/
typedef struct drm_bo_driver{
int cached[DRM_BO_MEM_TYPES];
drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device *dev);
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
} drm_bo_driver_t;
/**
@ -566,6 +676,7 @@ typedef struct drm_mm {
* a family of cards. There will one drm_device for each card present
* in this family
*/
struct drm_device;
struct drm_driver {
int (*load) (struct drm_device *, unsigned long flags);
@ -612,6 +723,9 @@ struct drm_driver {
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
int major;
int minor;
int patchlevel;
@ -641,6 +755,71 @@ typedef struct drm_head {
struct class_device *dev_class;
} drm_head_t;
typedef struct drm_cache {
/*
* Memory caches
*/
kmem_cache_t *mm;
kmem_cache_t *fence_object;
} drm_cache_t;
typedef struct drm_fence_driver{
int no_types;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
int (*emit) (struct drm_device *dev, uint32_t flags,
uint32_t *breadcrumb,
uint32_t *native_type);
void (*poke_flush) (struct drm_device *dev);
} drm_fence_driver_t;
#define _DRM_FENCE_TYPE_EXE 0x00
typedef struct drm_fence_manager{
int initialized;
rwlock_t lock;
/*
* The list below should be maintained in sequence order and
* access is protected by the above spinlock.
*/
struct list_head ring;
struct list_head *fence_types[32];
volatile uint32_t pending_flush;
wait_queue_head_t fence_queue;
int pending_exe_flush;
uint32_t last_exe_flush;
uint32_t exe_flush_sequence;
atomic_t count;
} drm_fence_manager_t;
typedef struct drm_buffer_manager{
struct mutex init_mutex;
int nice_mode;
int initialized;
drm_file_t *last_to_validate;
int has_type[DRM_BO_MEM_TYPES];
int use_type[DRM_BO_MEM_TYPES];
drm_mm_t manager[DRM_BO_MEM_TYPES];
struct list_head lru[DRM_BO_MEM_TYPES];
struct list_head pinned[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
struct work_struct wq;
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
} drm_buffer_manager_t;
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@ -688,6 +867,10 @@ typedef struct drm_device {
drm_map_list_t *maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
drm_open_hash_t object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
/** \name Context handle management */
/*@{ */
@ -774,6 +957,9 @@ typedef struct drm_device {
unsigned int agp_buffer_token;
drm_head_t primary; /**< primary screen head */
drm_fence_manager_t fm;
drm_buffer_manager_t bm;
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
@ -784,6 +970,75 @@ typedef struct drm_device {
/*@} */
} drm_device_t;
#if __OS_HAS_AGP
typedef struct drm_agp_ttm_priv {
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
unsigned alloc_type;
unsigned cached_type;
unsigned uncached_type;
int populated;
} drm_agp_ttm_priv;
#endif
typedef struct drm_fence_object{
drm_user_object_t base;
atomic_t usage;
/*
* The below three fields are protected by the fence manager spinlock.
*/
struct list_head ring;
int class;
uint32_t native_type;
uint32_t type;
uint32_t signaled;
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
} drm_fence_object_t;
typedef struct drm_buffer_object{
drm_device_t *dev;
drm_user_object_t base;
/*
* If there is a possibility that the usage variable is zero,
* then dev->struct_mutext should be locked before incrementing it.
*/
atomic_t usage;
drm_ttm_object_t *ttm_object;
drm_ttm_t *ttm;
unsigned long num_pages;
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
uint32_t page_alignment;
atomic_t mapped;
uint32_t flags;
uint32_t mask;
drm_mm_node_t *node_ttm; /* MM node for on-card RAM */
drm_mm_node_t *node_card; /* MM node for ttm*/
struct list_head lru_ttm; /* LRU for the ttm pages*/
struct list_head lru_card; /* For memory types with on-card RAM */
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
drm_fence_object_t *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
} drm_buffer_object_t;
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@ -841,6 +1096,7 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
#define drm_core_has_MTRR(dev) (0)
#endif
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@ -869,6 +1125,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
@ -884,6 +1141,14 @@ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
extern int drm_alloc_memctl(size_t size);
extern void drm_query_memctl(drm_u64_t *cur_used,
drm_u64_t *low_threshold,
drm_u64_t *high_threshold);
extern void drm_init_memctl(size_t low_threshold,
size_t high_threshold);
/* Misc. IOCTL support (drm_ioctl.h) */
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@ -951,6 +1216,13 @@ extern int drm_unlock(struct inode *inode, struct file *filp,
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context);
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
extern int drm_i_have_hw_lock(struct file *filp);
extern int drm_kernel_take_hw_lock(struct file *filp);
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
@ -1036,7 +1308,8 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
@ -1045,6 +1318,7 @@ extern int drm_put_head(drm_head_t * head);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern unsigned int drm_cards_limit;
extern drm_head_t **drm_heads;
extern drm_cache_t drm_cache;
extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
@ -1088,11 +1362,121 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
unsigned alignment);
extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur);
extern void drm_mm_put_block(drm_mm_node_t *cur);
extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(drm_mm_t *mm);
extern int drm_mm_clean(drm_mm_t *mm);
extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
{
return block->mm;
}
/*
* User space object bookkeeping (drm_object.c)
*/
/*
* Must be called with the struct_mutex held.
*/
extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
/*
* Must be called with the struct_mutex held.
*/
int shareable);
extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
*/
drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
drm_user_object_t *referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
drm_user_object_t **object);
extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
/*
* fence objects (drm_fence.c)
*/
extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
extern void drm_fence_manager_init(drm_device_t *dev);
extern void drm_fence_manager_takedown(drm_device_t *dev);
extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
extern int drm_fence_object_flush(drm_device_t * dev,
volatile drm_fence_object_t * fence,
uint32_t type);
extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
uint32_t type);
extern void drm_fence_usage_deref_locked(drm_device_t * dev,
drm_fence_object_t * fence);
extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
drm_fence_object_t * fence);
extern int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
uint32_t fence_flags,
drm_fence_object_t **c_fence);
extern int drm_fence_add_user_object(drm_file_t *priv,
drm_fence_object_t *fence,
int shareable);
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(drm_device_t *dev);
extern int drm_bo_driver_init(drm_device_t *dev);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t *fence,
drm_fence_object_t **used_fence);
/* Inline replacements for DRM_IOREMAP macros */
@ -1164,6 +1548,58 @@ extern void *drm_alloc(size_t size, int area);
extern void drm_free(void *pt, size_t size, int area);
#endif
/*
* Accounting variants of standard calls.
*/
static inline void *drm_ctl_alloc(size_t size, int area)
{
void *ret;
if (drm_alloc_memctl(size))
return NULL;
ret = drm_alloc(size, area);
if (!ret)
drm_free_memctl(size);
return ret;
}
static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
{
void *ret;
if (drm_alloc_memctl(nmemb*size))
return NULL;
ret = drm_calloc(nmemb, size, area);
if (!ret)
drm_free_memctl(nmemb*size);
return ret;
}
static inline void drm_ctl_free(void *pt, size_t size, int area)
{
drm_free(pt, size, area);
drm_free_memctl(size);
}
static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
int flags)
{
void *ret;
if (drm_alloc_memctl(size))
return NULL;
ret = kmem_cache_alloc(cache, flags);
if (!ret)
drm_free_memctl(size);
return ret;
}
static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
void *obj)
{
kmem_cache_free(cache, obj);
drm_free_memctl(size);
}
/*@}*/
#endif /* __KERNEL__ */

View File

@ -552,4 +552,162 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
/*
* AGP ttm backend interface.
*/
#ifndef AGP_USER_TYPES
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
#endif
static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
struct page **pages) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
if (drm_alloc_memctl(num_pages * sizeof(void *)))
return -1;
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
#else
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
#endif
if (!mem) {
drm_free_memctl(num_pages *sizeof(void *));
return -1;
}
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
for (cur_page = pages; cur_page < last_page; ++cur_page) {
mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
}
agp_priv->mem = mem;
return 0;
}
static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
unsigned long offset,
int cached)
{
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
int ret;
DRM_DEBUG("drm_agp_bind_ttm\n");
DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
(cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
mem->is_flushed = TRUE;
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
ret = drm_agp_bind_memory(mem, offset);
if (ret) {
DRM_ERROR("AGP Bind memory failed\n");
}
return ret;
}
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
else
return 0;
}
static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
DRM_DEBUG("drm_agp_clear_ttm\n");
if (mem) {
unsigned long num_pages = mem->page_count;
backend->unbind(backend);
agp_free_memory(mem);
drm_free_memctl(num_pages *sizeof(void *));
}
agp_priv->mem = NULL;
}
static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_priv = (drm_agp_ttm_priv *) backend->private;
if (agp_priv) {
if (agp_priv->mem) {
backend->clear(backend);
}
drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
backend->private = NULL;
}
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
}
}
}
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend)
{
drm_ttm_backend_t *agp_be;
drm_agp_ttm_priv *agp_priv;
agp_be = (backend != NULL) ? backend:
drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
if (!agp_be)
return NULL;
agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
if (!agp_priv) {
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
return NULL;
}
agp_priv->mem = NULL;
agp_priv->alloc_type = AGP_USER_MEMORY;
agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
agp_priv->uncached_type = AGP_USER_MEMORY;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
agp_be->aperture_base = dev->agp->agp_info.aper_base;
agp_be->private = (void *) agp_priv;
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
agp_be->populate = drm_agp_populate;
agp_be->clear = drm_agp_clear_ttm;
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
(backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
(dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
}
EXPORT_SYMBOL(drm_agp_init_ttm);
#endif /* __OS_HAS_AGP */

1998
linux-core/drm_bo.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -80,14 +80,14 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
if (!use_hashed_handle) {
int ret;
hash->key = user_token;
hash->key = user_token >> PAGE_SHIFT;
ret = drm_ht_insert_item(&dev->map_hash, hash);
if (ret != -EINVAL)
return ret;
}
return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
}
/**
@ -301,7 +301,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
return ret;
}
list->user_token = list->hash.key;
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
*maplist = list;
@ -386,7 +386,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
if (r_list->map == map) {
list_del(list);
drm_ht_remove_key(&dev->map_hash, r_list->user_token);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
break;
}
@ -422,6 +423,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_TTM:
BUG_ON(1);
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);

434
linux-core/drm_compat.c Normal file
View File

@ -0,0 +1,434 @@
/**************************************************************************
*
* This kernel module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
**************************************************************************/
/*
* This code provides access to unexported mm kernel features. It is necessary
* to use the new DRM memory manager code with kernels that don't support it
* directly.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Linux kernel mm subsystem authors.
* (Most code taken from there).
*/
#include "drmP.h"
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* These have bad performance in the AGP module for the indicated kernel versions.
*/
int drm_map_page_into_agp(struct page *page)
{
int i;
i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
/* Caller's responsibility to call global_flush_tlb() for
* performance reasons */
return i;
}
int drm_unmap_page_from_agp(struct page *page)
{
int i;
i = change_page_attr(page, 1, PAGE_KERNEL);
/* Caller's responsibility to call global_flush_tlb() for
* performance reasons */
return i;
}
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
/*
* The protection map was exported in 2.6.19
*/
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
#ifdef MODULE
static pgprot_t drm_protection_map[16] = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
return drm_protection_map[vm_flags & 0x0F];
#else
extern pgprot_t protection_map[];
return protection_map[vm_flags & 0x0F];
#endif
};
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* vm code for kernels below 2,6,15 in which version a major vm write
* occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.20+?
*/
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
return ret;
}
static struct {
spinlock_t lock;
struct page *dummy_page;
atomic_t present;
} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
struct page * get_nopage_retry(void)
{
if (atomic_read(&drm_np_retry.present) == 0) {
struct page *page = alloc_page(GFP_KERNEL);
if (!page)
return NOPAGE_OOM;
spin_lock(&drm_np_retry.lock);
drm_np_retry.dummy_page = page;
atomic_set(&drm_np_retry.present,1);
spin_unlock(&drm_np_retry.lock);
}
get_page(drm_np_retry.dummy_page);
return drm_np_retry.dummy_page;
}
void free_nopage_retry(void)
{
if (atomic_read(&drm_np_retry.present) == 1) {
spin_lock(&drm_np_retry.lock);
__free_page(drm_np_retry.dummy_page);
drm_np_retry.dummy_page = NULL;
atomic_set(&drm_np_retry.present, 0);
spin_unlock(&drm_np_retry.lock);
}
}
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
struct fault_data data;
if (type)
*type = VM_FAULT_MINOR;
data.address = address;
data.vma = vma;
drm_vm_ttm_fault(vma, &data);
switch (data.type) {
case VM_FAULT_OOM:
return NOPAGE_OOM;
case VM_FAULT_SIGBUS:
return NOPAGE_SIGBUS;
default:
break;
}
return NOPAGE_REFAULT;
}
#endif
#ifdef DRM_ODD_MM_COMPAT
/*
* VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
* phew.
*/
typedef struct p_mm_entry {
struct list_head head;
struct mm_struct *mm;
atomic_t refcount;
int locked;
} p_mm_entry_t;
typedef struct vma_entry {
struct list_head head;
struct vm_area_struct *vma;
} vma_entry_t;
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
/*
* FIXME: Check can't map aperture flag.
*/
if (type)
*type = VM_FAULT_MINOR;
if (!map)
return NOPAGE_OOM;
if (address > vma->vm_end)
return NOPAGE_SIGBUS;
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
page = NOPAGE_OOM;
goto out;
}
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
drm_free_memctl(PAGE_SIZE);
page = NOPAGE_OOM;
goto out;
}
++bm->cur_pages;
SetPageLocked(page);
}
get_page(page);
out:
mutex_unlock(&dev->struct_mutex);
return page;
}
int drm_ttm_map_bound(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
int ret = 0;
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
unsigned long pfn = ttm->aper_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
}
return ret;
}
int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
drm_local_map_t *map = (drm_local_map_t *)
vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
map->handle = (void *) v_entry;
list_add_tail(&v_entry->head, &ttm->vma_list);
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&n_entry->head);
n_entry->mm = mm;
n_entry->locked = 0;
atomic_set(&n_entry->refcount, 0);
list_add_tail(&n_entry->head, &entry->head);
return 0;
}
void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
break;
}
}
BUG_ON(!found);
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
}
return;
}
}
BUG_ON(1);
}
int drm_ttm_lock_mm(drm_ttm_t * ttm)
{
p_mm_entry_t *entry;
int lock_ok = 1;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
break;
}
entry->locked = 1;
}
if (lock_ok)
return 0;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
/*
* Possible deadlock. Try again. Our callers should handle this
* and restart.
*/
return -EAGAIN;
}
void drm_ttm_unlock_mm(drm_ttm_t * ttm)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
int drm_ttm_remap_bound(drm_ttm_t *ttm)
{
vma_entry_t *v_entry;
int ret = 0;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
ret = drm_ttm_map_bound(v_entry->vma);
if (ret)
break;
}
drm_ttm_unlock_mm(ttm);
return ret;
}
void drm_ttm_finish_unmap(drm_ttm_t *ttm)
{
vma_entry_t *v_entry;
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
return;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
drm_ttm_unlock_mm(ttm);
}
#endif

View File

@ -31,6 +31,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <asm/agp.h>
#ifndef _DRM_COMPAT_H_
#define _DRM_COMPAT_H_
@ -227,4 +228,152 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
}
#endif
#include <linux/mm.h>
#include <asm/page.h>
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
/*
* Flush relevant caches and clear a VMA structure so that page references
* will cause a page fault. Don't flush tlbs.
*/
extern void drm_clear_vma(struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
/*
* Return the PTE protection map entries for the VMA flags given by
* flags. This is a functional interface to the kernel's protection map.
*/
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
/*
* These are similar to the current kernel gatt pages allocator, only that we
* want a struct page pointer instead of a virtual address. This allows for pages
* that are not in the kernel linear map.
*/
#define drm_alloc_gatt_pages(order) ({ \
void *_virt = alloc_gatt_pages(order); \
((_virt) ? virt_to_page(_virt) : NULL);})
#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* These are too slow in earlier kernels.
*/
extern int drm_unmap_page_from_agp(struct page *page);
extern int drm_map_page_into_agp(struct page *page);
#define map_page_into_agp drm_map_page_into_agp
#define unmap_page_from_agp drm_unmap_page_from_agp
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
struct fault_data;
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
/*
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
*/
struct fault_data {
struct vm_area_struct *vma;
unsigned long address;
pgoff_t pgoff;
unsigned int flags;
int type;
};
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#endif
#ifdef DRM_ODD_MM_COMPAT
struct drm_ttm;
/*
* Add a vma to the ttm vma list, and the
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
extern int drm_ttm_add_vma(struct drm_ttm * ttm,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
* not releasing the ttm mutex. May return -EAGAIN to avoid
* deadlocks. In that case the caller shall release the ttm mutex,
* schedule() and try again.
*/
extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
/*
* If the ttm was bound to the aperture, this function shall be called
* with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
* vmas mapping this ttm. This is needed just after unmapping the ptes of
* the vma, otherwise the do_nopage() function will bug :(. The function
* releases the mmap_sems for this ttm.
*/
extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
* fault these pfns in, because the first one will set the vma VM_PFNMAP
* flag, which will make the next fault bug in do_nopage(). The function
* releases the mmap_sems for this ttm.
*/
extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
extern int drm_ttm_map_bound(struct vm_area_struct *vma);
#endif
#endif

View File

@ -1 +0,0 @@
../shared-core/drm_drawable.c

View File

@ -119,12 +119,17 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
[DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
[DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
[DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl,
DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
* Take down the DRM device.
*
@ -143,6 +148,11 @@ int drm_lastclose(drm_device_t * dev)
DRM_DEBUG("\n");
if (drm_bo_driver_finish(dev)) {
DRM_ERROR("DRM memory manager still busy. "
"System is unstable. Please reboot.\n");
}
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
@ -218,7 +228,7 @@ int drm_lastclose(drm_device_t * dev)
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
@ -256,6 +266,7 @@ int drm_lastclose(drm_device_t * dev)
dev->lock.filp = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("lastclose completed\n");
@ -360,11 +371,14 @@ static void drm_cleanup(drm_device_t * dev)
}
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
}
if (!drm_fb_loaded)
@ -419,6 +433,9 @@ void drm_exit(struct drm_driver *driver)
}
} else
pci_unregister_driver(&driver->pci_driver);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
free_nopage_retry();
#endif
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_exit);
@ -429,10 +446,64 @@ static struct file_operations drm_stub_fops = {
.open = drm_stub_open
};
static int drm_create_memory_caches(void)
{
drm_cache.mm = kmem_cache_create("drm_mm_node_t",
sizeof(drm_mm_node_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!drm_cache.mm)
return -ENOMEM;
drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",
sizeof(drm_fence_object_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!drm_cache.fence_object)
return -ENOMEM;
return 0;
}
static void drm_free_mem_cache(kmem_cache_t *cache,
const char *name)
{
if (!cache)
return;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
if (kmem_cache_destroy(cache)) {
DRM_ERROR("Warning! DRM is leaking %s memory.\n",
name);
}
#else
kmem_cache_destroy(cache);
#endif
}
static void drm_free_memory_caches(void )
{
drm_free_mem_cache(drm_cache.fence_object, "fence object");
drm_cache.fence_object = NULL;
drm_free_mem_cache(drm_cache.mm, "memory manager block");
drm_cache.mm = NULL;
}
static int __init drm_core_init(void)
{
int ret = -ENOMEM;
int ret;
struct sysinfo si;
si_meminfo(&si);
drm_init_memctl(si.totalram/2, si.totalram*3/4);
ret = drm_create_memory_caches();
if (ret)
goto err_p1;
ret = -ENOMEM;
drm_cards_limit =
(drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
@ -468,11 +539,13 @@ err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
err_p1:
drm_free_memory_caches();
return ret;
}
static void __exit drm_core_exit(void)
{
drm_free_memory_caches();
remove_proc_entry("dri", NULL);
drm_sysfs_destroy(drm_class);
@ -549,14 +622,19 @@ int drm_ioctl(struct inode *inode, struct file *filp,
current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
priv->authenticated);
if (nr < DRIVER_IOCTL_COUNT)
ioctl = &drm_ioctls[nr];
else if ((nr >= DRM_COMMAND_BASE)
if (nr >= DRIVER_IOCTL_COUNT &&
(nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)
ioctl = &drm_ioctls[nr];
else
goto err_i1;
func = ioctl->func;
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) /* Local override? */
func = dev->driver->dma_ioctl;

619
linux-core/drm_fence.c Normal file
View File

@ -0,0 +1,619 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/*
* Typically called by the IRQ handler.
*/
void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
{
int wake = 0;
uint32_t diff;
uint32_t relevant;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
struct list_head *list, *prev;
drm_fence_object_t *fence;
int found = 0;
if (list_empty(&fm->ring))
return;
list_for_each_entry(fence, &fm->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff > driver->wrap_diff) {
found = 1;
break;
}
}
list = (found) ? fence->ring.prev : fm->ring.prev;
prev = list->prev;
for (; list != &fm->ring; list = prev, prev = list->prev) {
fence = list_entry(list, drm_fence_object_t, ring);
type |= fence->native_type;
relevant = type & fence->type;
if ((fence->signaled | relevant) != fence->signaled) {
fence->signaled |= relevant;
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
fence->base.hash.key, fence->signaled);
fence->submitted_flush |= relevant;
wake = 1;
}
relevant = fence->flush_mask &
~(fence->signaled | fence->submitted_flush);
if (relevant) {
fm->pending_flush |= relevant;
fence->submitted_flush = fence->flush_mask;
}
if (!(fence->type & ~fence->signaled)) {
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
fence->base.hash.key);
list_del_init(&fence->ring);
}
}
if (wake) {
DRM_WAKEUP(&fm->fence_queue);
}
}
EXPORT_SYMBOL(drm_fence_handler);
static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
{
drm_fence_manager_t *fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
list_del_init(ring);
write_unlock_irqrestore(&fm->lock, flags);
}
void drm_fence_usage_deref_locked(drm_device_t * dev,
drm_fence_object_t * fence)
{
drm_fence_manager_t *fm = &dev->fm;
if (atomic_dec_and_test(&fence->usage)) {
drm_fence_unring(dev, &fence->ring);
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
fence->base.hash.key);
atomic_dec(&fm->count);
drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
fence);
}
}
void drm_fence_usage_deref_unlocked(drm_device_t * dev,
drm_fence_object_t * fence)
{
drm_fence_manager_t *fm = &dev->fm;
if (atomic_dec_and_test(&fence->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
atomic_dec(&fm->count);
drm_ctl_cache_free(drm_cache.fence_object,
sizeof(*fence), fence);
}
mutex_unlock(&dev->struct_mutex);
}
}
static void drm_fence_object_destroy(drm_file_t * priv,
drm_user_object_t * base)
{
drm_device_t *dev = priv->head->dev;
drm_fence_object_t *fence =
drm_user_object_entry(base, drm_fence_object_t, base);
drm_fence_usage_deref_locked(dev, fence);
}
static int fence_signaled(drm_device_t * dev, volatile
drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
{
unsigned long flags;
int signaled;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
if (poke_flush)
driver->poke_flush(dev);
read_lock_irqsave(&fm->lock, flags);
signaled =
(fence->type & mask & fence->signaled) == (fence->type & mask);
read_unlock_irqrestore(&fm->lock, flags);
return signaled;
}
static void drm_fence_flush_exe(drm_fence_manager_t * fm,
drm_fence_driver_t * driver, uint32_t sequence)
{
uint32_t diff;
if (!fm->pending_exe_flush) {
volatile struct list_head *list;
/*
* Last_exe_flush is invalid. Find oldest sequence.
*/
/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
list = &fm->ring;
if (list->next == &fm->ring) {
return;
} else {
drm_fence_object_t *fence =
list_entry(list->next, drm_fence_object_t, ring);
fm->last_exe_flush = (fence->sequence - 1) &
driver->sequence_mask;
}
diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
if (diff >= driver->wrap_diff)
return;
fm->exe_flush_sequence = sequence;
fm->pending_exe_flush = 1;
} else {
diff =
(sequence - fm->exe_flush_sequence) & driver->sequence_mask;
if (diff < driver->wrap_diff) {
fm->exe_flush_sequence = sequence;
}
}
}
int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
uint32_t type)
{
return ((fence->signaled & type) == type);
}
int drm_fence_object_flush(drm_device_t * dev,
volatile drm_fence_object_t * fence, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
if (type & ~fence->type) {
DRM_ERROR("Flush trying to extend fence type, "
"0x%x, 0x%x\n", type, fence->type);
return -EINVAL;
}
write_lock_irqsave(&fm->lock, flags);
fence->flush_mask |= type;
if (fence->submitted_flush == fence->signaled) {
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
drm_fence_flush_exe(fm, driver, fence->sequence);
fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
} else {
fm->pending_flush |= (fence->flush_mask &
~fence->submitted_flush);
fence->submitted_flush = fence->flush_mask;
}
}
write_unlock_irqrestore(&fm->lock, flags);
driver->poke_flush(dev);
return 0;
}
/*
* Make sure old fence objects are signaled before their fence sequences are
* wrapped around and reused.
*/
void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t old_sequence;
unsigned long flags;
drm_fence_object_t *fence;
uint32_t diff;
mutex_lock(&dev->struct_mutex);
read_lock_irqsave(&fm->lock, flags);
if (fm->ring.next == &fm->ring) {
read_unlock_irqrestore(&fm->lock, flags);
mutex_unlock(&dev->struct_mutex);
return;
}
old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
atomic_inc(&fence->usage);
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
read_unlock_irqrestore(&fm->lock, flags);
if (diff < driver->wrap_diff) {
drm_fence_object_flush(dev, fence, fence->type);
}
drm_fence_usage_deref_unlocked(dev, fence);
}
EXPORT_SYMBOL(drm_fence_flush_old);
int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
int signaled;
if (mask & ~fence->type) {
DRM_ERROR("Wait trying to extend fence type"
" 0x%08x 0x%08x\n", mask, fence->type);
return -EINVAL;
}
if (fence_signaled(dev, fence, mask, 0))
return 0;
_end = jiffies + 3 * DRM_HZ;
drm_fence_object_flush(dev, fence, mask);
if (lazy && driver->lazy_capable) {
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence, mask, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
if (ret) {
if (ret == -EBUSY) {
DRM_ERROR("Fence timeout. "
"GPU lockup or fence driver was "
"taken down.\n");
}
return ((ret == -EINTR) ? -EAGAIN : ret);
}
} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
driver->lazy_capable) {
/*
* We use IRQ wait for EXE fence if available to gain
* CPU in some cases.
*/
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence,
DRM_FENCE_TYPE_EXE, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
if (ret)
return ((ret == -EINTR) ? -EAGAIN : ret);
}
if (fence_signaled(dev, fence, mask, 0))
return 0;
/*
* Avoid kernel-space busy-waits.
*/
#if 1
if (!ignore_signals)
return -EAGAIN;
#endif
do {
schedule();
signaled = fence_signaled(dev, fence, mask, 1);
} while (!signaled && !time_after_eq(jiffies, _end));
if (!signaled)
return -EBUSY;
return 0;
}
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
uint32_t fence_flags, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
int ret;
drm_fence_unring(dev, &fence->ring);
ret = driver->emit(dev, fence_flags, &sequence, &native_type);
if (ret)
return ret;
write_lock_irqsave(&fm->lock, flags);
fence->type = type;
fence->flush_mask = 0x00;
fence->submitted_flush = 0x00;
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
list_add_tail(&fence->ring, &fm->ring);
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
uint32_t fence_flags,
drm_fence_object_t * fence)
{
int ret = 0;
unsigned long flags;
drm_fence_manager_t *fm = &dev->fm;
mutex_lock(&dev->struct_mutex);
atomic_set(&fence->usage, 1);
mutex_unlock(&dev->struct_mutex);
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
fence->class = 0;
fence->type = type;
fence->flush_mask = 0;
fence->submitted_flush = 0;
fence->signaled = 0;
fence->sequence = 0;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
ret = drm_fence_object_emit(dev, fence, fence_flags, type);
}
return ret;
}
int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
int shareable)
{
drm_device_t *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(priv, &fence->base, shareable);
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
fence->base.type = drm_fence_type;
fence->base.remove = &drm_fence_object_destroy;
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
return 0;
}
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(drm_device_t * dev, uint32_t type,
unsigned flags, drm_fence_object_t ** c_fence)
{
drm_fence_object_t *fence;
int ret;
drm_fence_manager_t *fm = &dev->fm;
fence = drm_ctl_cache_alloc(drm_cache.fence_object,
sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
}
*c_fence = fence;
atomic_inc(&fm->count);
return 0;
}
EXPORT_SYMBOL(drm_fence_object_create);
void drm_fence_manager_init(drm_device_t * dev)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *fed = dev->driver->fence_driver;
int i;
fm->lock = RW_LOCK_UNLOCKED;
write_lock(&fm->lock);
INIT_LIST_HEAD(&fm->ring);
fm->pending_flush = 0;
DRM_INIT_WAITQUEUE(&fm->fence_queue);
fm->initialized = 0;
if (fed) {
fm->initialized = 1;
atomic_set(&fm->count, 0);
for (i = 0; i < fed->no_types; ++i) {
fm->fence_types[i] = &fm->ring;
}
}
write_unlock(&fm->lock);
}
void drm_fence_manager_takedown(drm_device_t * dev)
{
}
drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
{
drm_device_t *dev = priv->head->dev;
drm_user_object_t *uo;
drm_fence_object_t *fence;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, handle);
if (!uo || (uo->type != drm_fence_type)) {
mutex_unlock(&dev->struct_mutex);
return NULL;
}
fence = drm_user_object_entry(uo, drm_fence_object_t, base);
atomic_inc(&fence->usage);
mutex_unlock(&dev->struct_mutex);
return fence;
}
int drm_fence_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
int ret;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_arg_t arg;
drm_fence_object_t *fence;
drm_user_object_t *uo;
unsigned long flags;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
switch (arg.op) {
case drm_fence_create:
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,
arg.flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
}
/*
* usage > 0. No need to lock dev->struct_mutex;
*/
atomic_inc(&fence->usage);
arg.handle = fence->base.hash.key;
break;
case drm_fence_destroy:
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, arg.handle);
if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
ret = drm_remove_user_object(priv, uo);
mutex_unlock(&dev->struct_mutex);
return ret;
case drm_fence_reference:
ret =
drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
if (ret)
return ret;
fence = drm_lookup_fence_object(priv, arg.handle);
break;
case drm_fence_unreference:
ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
return ret;
case drm_fence_signaled:
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
break;
case drm_fence_flush:
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_flush(dev, fence, arg.type);
break;
case drm_fence_wait:
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret =
drm_fence_object_wait(dev, fence,
arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
0, arg.type);
break;
case drm_fence_emit:
LOCK_TEST_WITH_RETURN(dev, filp);
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
break;
case drm_fence_buffers:
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized\n");
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
NULL, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,
arg.flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret)
return ret;
atomic_inc(&fence->usage);
arg.handle = fence->base.hash.key;
break;
default:
return -EINVAL;
}
read_lock_irqsave(&fm->lock, flags);
arg.class = fence->class;
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
}

View File

@ -47,6 +47,7 @@ static int drm_setup(drm_device_t * dev)
int i;
int ret;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
@ -56,6 +57,7 @@ static int drm_setup(drm_device_t * dev)
dev->magicfree.next = NULL;
/* prebuild the SAREA */
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
@ -156,6 +158,12 @@ int drm_open(struct inode *inode, struct file *filp)
}
spin_unlock(&dev->count_lock);
}
mutex_lock(&dev->struct_mutex);
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
mutex_unlock(&dev->struct_mutex);
return retcode;
}
@ -233,6 +241,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor = iminor(inode);
drm_file_t *priv;
int ret;
int i,j;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
@ -256,6 +265,22 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->user_objects);
INIT_LIST_HEAD(&priv->refd_objects);
for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
if (ret)
break;
}
if (ret) {
for(j=0; j<i; ++j) {
drm_ht_remove(&priv->refd_object_hash[j]);
}
goto out_free;
}
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@ -320,6 +345,53 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
static void drm_object_release(struct file *filp) {
drm_file_t *priv = filp->private_data;
struct list_head *head;
drm_user_object_t *user_object;
drm_ref_object_t *ref_object;
int i;
/*
* Free leftover ref objects created by me. Note that we cannot use
* list_for_each() here, as the struct_mutex may be temporarily released
* by the remove_() functions, and thus the lists may be altered.
* Also, a drm_remove_ref_object() will not remove it
* from the list unless its refcount is 1.
*/
head = &priv->refd_objects;
while (head->next != head) {
ref_object = list_entry(head->next, drm_ref_object_t, list);
drm_remove_ref_object(priv, ref_object);
head = &priv->refd_objects;
}
/*
* Free leftover user objects created by me.
*/
head = &priv->user_objects;
while (head->next != head) {
user_object = list_entry(head->next, drm_user_object_t, list);
drm_remove_user_object(priv, user_object);
head = &priv->user_objects;
}
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
drm_ht_remove(&priv->refd_object_hash[i]);
}
}
/**
* Release file.
*
@ -354,58 +426,43 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(priv->head->device),
dev->open_count);
if (priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.filp == filp) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->driver->reclaim_buffers_locked) {
unsigned long _end = jiffies + DRM_HZ*3;
if (dev->driver->reclaim_buffers_locked)
do {
retcode = drm_kernel_take_hw_lock(filp);
} while(retcode && !time_after_eq(jiffies,_end));
if (!retcode) {
dev->driver->reclaim_buffers_locked(dev, filp);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
} else {
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
} else if (dev->driver->reclaim_buffers_locked && priv->lock_count
&& dev->lock.hw_lock) {
/* The lock is required to reclaim buffers */
DECLARE_WAITQUEUE(entry, current);
/*
* FIXME: This is not a good solution. We should perhaps associate the
* DRM lock with a process context, and check whether the current process
* holds the lock. Then we can run reclaim buffers locked anyway.
*/
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = -EINTR;
break;
DRM_ERROR("Reclaim buffers locked deadlock.\n");
DRM_ERROR("This is probably a single thread having multiple\n");
DRM_ERROR("DRM file descriptors open either dying or "
"closing file descriptors\n");
DRM_ERROR("while having the lock. I will not reclaim buffers.\n");
DRM_ERROR("Locking context is 0x%08x\n",
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
schedule();
if (signal_pending(current)) {
retcode = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
if (!retcode) {
dev->driver->reclaim_buffers_locked(dev, filp);
} else if (drm_i_have_hw_lock(filp)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, filp);
@ -414,6 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
drm_ctx_list_t *pos, *n;
@ -435,6 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while (temp) {

View File

@ -36,25 +36,34 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
{
unsigned int i;
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
ht->table = vmalloc(ht->size*sizeof(*ht->table));
ht->table = NULL;
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
ht->table = drm_calloc(ht->size, sizeof(*ht->table),
DRM_MEM_HASHTAB);
}
if (!ht->table) {
ht->use_vmalloc = 1;
ht->table = vmalloc(ht->size * sizeof(*ht->table));
}
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
for (i=0; i< ht->size; ++i) {
for (i = 0; i < ht->size; ++i) {
INIT_HLIST_HEAD(&ht->table[i]);
}
return 0;
}
void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
{
drm_hash_item_t *entry;
struct hlist_head *h_list;
@ -71,7 +80,7 @@ void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
}
}
static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
unsigned long key)
{
drm_hash_item_t *entry;
@ -91,8 +100,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
return NULL;
}
int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
{
drm_hash_item_t *entry;
struct hlist_head *h_list;
@ -123,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
@ -138,7 +146,7 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while(ret && (unshifted_key != first));
} while (ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
@ -147,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
return 0;
}
int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
drm_hash_item_t **item)
int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
drm_hash_item_t ** item)
{
struct hlist_node *list;
@ -160,7 +168,7 @@ int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
return 0;
}
int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
{
struct hlist_node *list;
@ -173,18 +181,21 @@ int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
return -EINVAL;
}
int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
void drm_ht_remove(drm_open_hash_t *ht)
void drm_ht_remove(drm_open_hash_t * ht)
{
if (ht->table) {
if (ht->use_vmalloc)
vfree(ht->table);
else
drm_free(ht->table, ht->size * sizeof(*ht->table),
DRM_MEM_HASHTAB);
ht->table = NULL;
}
}

View File

@ -47,6 +47,7 @@ typedef struct drm_open_hash{
unsigned int order;
unsigned int fill;
struct hlist_head *table;
int use_vmalloc;
} drm_open_hash_t;

View File

@ -118,6 +118,7 @@ static int drm_irq_install(drm_device_t * dev)
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->tasklet_lock);
INIT_LIST_HEAD(&dev->vbl_sigs.head);
INIT_LIST_HEAD(&dev->vbl_sigs2.head);

View File

@ -35,9 +35,12 @@
#include "drmP.h"
#if 0
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
unsigned int context);
#endif
static int drm_notifier(void *priv);
/**
@ -181,12 +184,9 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
lock.context)) {
/* FIXME: Should really bail out here. */
}
}
@ -212,7 +212,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD;
new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCKING_CONTEXT(old) == context) {
@ -224,13 +224,14 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
/* Have lock */
return 1;
}
return 0;
}
#if 0
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
@ -257,6 +258,7 @@ static int drm_lock_transfer(drm_device_t * dev,
} while (prev != old);
return 1;
}
#endif
/**
* Free lock.
@ -274,12 +276,12 @@ int drm_lock_free(drm_device_t * dev,
{
unsigned int old, new, prev;
dev->lock.filp = NULL;
do {
old = *lock;
new = 0;
new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
@ -319,3 +321,66 @@ static int drm_notifier(void *priv)
} while (prev != old);
return 0;
}
/*
* Can be used by drivers to take the hardware lock if necessary.
* (Waiting for idle before reclaiming buffers etc.)
*/
int drm_i_have_hw_lock(struct file *filp)
{
DRM_DEVICE;
return (priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.filp == filp);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
int drm_kernel_take_hw_lock(struct file *filp)
{
DRM_DEVICE;
int ret = 0;
unsigned long _end = jiffies + 3*DRM_HZ;
if (!drm_i_have_hw_lock(filp)) {
DECLARE_WAITQUEUE(entry, current);
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
if (time_after_eq(jiffies,_end)) {
ret = -EBUSY;
break;
}
schedule_timeout(1);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
return ret;
}
EXPORT_SYMBOL(drm_kernel_take_hw_lock);

View File

@ -33,10 +33,78 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include <linux/highmem.h>
#include "drmP.h"
static struct {
spinlock_t lock;
drm_u64_t cur_used;
drm_u64_t low_threshold;
drm_u64_t high_threshold;
} drm_memctl = {
.lock = SPIN_LOCK_UNLOCKED
};
static inline size_t drm_size_align(size_t size) {
register size_t tmpSize = 4;
if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
while(tmpSize < size)
tmpSize <<= 1;
return (size_t) tmpSize;
}
int drm_alloc_memctl(size_t size)
{
int ret;
unsigned long a_size = drm_size_align(size);
spin_lock(&drm_memctl.lock);
ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
-ENOMEM : 0;
if (!ret)
drm_memctl.cur_used += a_size;
spin_unlock(&drm_memctl.lock);
return ret;
}
EXPORT_SYMBOL(drm_alloc_memctl);
void drm_free_memctl(size_t size)
{
unsigned long a_size = drm_size_align(size);
spin_lock(&drm_memctl.lock);
drm_memctl.cur_used -= a_size;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_free_memctl);
void drm_query_memctl(drm_u64_t *cur_used,
drm_u64_t *low_threshold,
drm_u64_t *high_threshold)
{
spin_lock(&drm_memctl.lock);
*cur_used = drm_memctl.cur_used;
*low_threshold = drm_memctl.low_threshold;
*high_threshold = drm_memctl.high_threshold;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_query_memctl);
void drm_init_memctl(size_t p_low_threshold,
size_t p_high_threshold)
{
spin_lock(&drm_memctl.lock);
drm_memctl.cur_used = 0;
drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
spin_unlock(&drm_memctl.lock);
}
#ifndef DEBUG_MEMORY
/** No-op. */

View File

@ -33,7 +33,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include "drmP.h"

View File

@ -31,7 +31,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#ifdef DEBUG_MEMORY

View File

@ -31,7 +31,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
typedef struct drm_mem_stats {

View File

@ -42,36 +42,137 @@
*/
#include "drmP.h"
#include <linux/slab.h>
unsigned long drm_mm_tail_space(drm_mm_t *mm)
{
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
{
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return -ENOMEM;
if (entry->size <= size)
return -ENOMEM;
entry->size -= size;
return 0;
}
static int drm_mm_create_tail_node(drm_mm_t *mm,
unsigned long start,
unsigned long size)
{
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
if (!child)
return -ENOMEM;
child->free = 1;
child->size = size;
child->start = start;
child->mm = mm;
list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
return 0;
}
int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
{
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
}
entry->size += size;
return 0;
}
static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
unsigned long size)
{
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
if (!child)
return NULL;
INIT_LIST_HEAD(&child->fl_entry);
child->free = 0;
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
list_add_tail(&child->ml_entry, &parent->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
parent->size -= size;
parent->start += size;
return child;
}
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned long size, unsigned alignment)
{
drm_mm_node_t *align_splitoff = NULL;
drm_mm_node_t *child;
unsigned tmp = 0;
if (alignment)
size += alignment - 1;
tmp = size % alignment;
if (tmp) {
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
if (parent->size == size) {
list_del_init(&parent->fl_entry);
parent->free = 0;
return parent;
} else {
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
child = drm_mm_split_at_start(parent, size);
if (!child) {
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return NULL;
INIT_LIST_HEAD(&child->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
child->free = 0;
child->size = size;
child->start = parent->start;
list_add_tail(&child->ml_entry, &parent->ml_entry);
parent->size -= size;
parent->start += size;
}
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return child;
}
@ -80,9 +181,10 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
* Otherwise add to the free stack.
*/
void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
void drm_mm_put_block(drm_mm_node_t * cur)
{
drm_mm_t *mm = cur->mm;
drm_mm_node_t *list_root = &mm->root_node;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &list_root->ml_entry;
@ -105,8 +207,9 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
drm_free(next_node, sizeof(*next_node),
DRM_MEM_MM);
drm_ctl_cache_free(drm_cache.mm,
sizeof(*next_node),
next_node);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -119,7 +222,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
list_add(&cur->fl_entry, &list_root->fl_entry);
} else {
list_del(&cur->ml_entry);
drm_free(cur, sizeof(*cur), DRM_MEM_MM);
drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
}
}
@ -132,16 +235,23 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
drm_mm_node_t *entry;
drm_mm_node_t *best;
unsigned long best_size;
unsigned wasted;
best = NULL;
best_size = ~0UL;
if (alignment)
size += alignment - 1;
list_for_each(list, free_stack) {
entry = list_entry(list, drm_mm_node_t, fl_entry);
if (entry->size >= size) {
wasted = 0;
if (alignment) {
register unsigned tmp = size % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
@ -154,27 +264,19 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
return best;
}
int drm_mm_clean(drm_mm_t * mm)
{
struct list_head *head = &mm->root_node.ml_entry;
return (head->next->next == head);
}
int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
{
drm_mm_node_t *child;
INIT_LIST_HEAD(&mm->root_node.ml_entry);
INIT_LIST_HEAD(&mm->root_node.fl_entry);
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
INIT_LIST_HEAD(&child->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
child->start = start;
child->size = size;
child->free = 1;
list_add(&child->fl_entry, &mm->root_node.fl_entry);
list_add(&child->ml_entry, &mm->root_node.ml_entry);
return 0;
return drm_mm_create_tail_node(mm, start, size);
}
EXPORT_SYMBOL(drm_mm_init);
@ -194,8 +296,7 @@ void drm_mm_takedown(drm_mm_t * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
}
EXPORT_SYMBOL(drm_mm_takedown);

287
linux-core/drm_object.c Normal file
View File

@ -0,0 +1,287 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#include "drmP.h"
int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
int shareable)
{
drm_device_t *dev = priv->head->dev;
int ret;
atomic_set(&item->refcount, 1);
item->shareable = shareable;
item->owner = priv;
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
(unsigned long)item, 32, 0, 0);
if (ret)
return ret;
list_add_tail(&item->list, &priv->user_objects);
return 0;
}
drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
{
drm_device_t *dev = priv->head->dev;
drm_hash_item_t *hash;
int ret;
drm_user_object_t *item;
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret) {
return NULL;
}
item = drm_hash_entry(hash, drm_user_object_t, hash);
if (priv != item->owner) {
drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE];
ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
if (ret) {
DRM_ERROR("Object not registered for usage\n");
return NULL;
}
}
return item;
}
static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
{
drm_device_t *dev = priv->head->dev;
int ret;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
BUG_ON(ret);
list_del_init(&item->list);
item->remove(priv, item);
}
}
int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
{
if (item->owner != priv) {
DRM_ERROR("Cannot destroy object not owned by you.\n");
return -EINVAL;
}
item->owner = 0;
item->shareable = 0;
list_del_init(&item->list);
drm_deref_user_object(priv, item);
return 0;
}
static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
drm_ref_t action)
{
int ret = 0;
switch (action) {
case _DRM_REF_USE:
atomic_inc(&ro->refcount);
break;
default:
if (!ro->ref_struct_locked) {
break;
} else {
ro->ref_struct_locked(priv, ro, action);
}
}
return ret;
}
int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
drm_ref_t ref_action)
{
int ret = 0;
drm_ref_object_t *item;
drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
if (!referenced_object->shareable && priv != referenced_object->owner) {
DRM_ERROR("Not allowed to reference this object\n");
return -EINVAL;
}
/*
* If this is not a usage reference, Check that usage has been registered
* first. Otherwise strange things may happen on destruction.
*/
if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
item =
drm_lookup_ref_object(priv, referenced_object,
_DRM_REF_USE);
if (!item) {
DRM_ERROR
("Object not registered for usage by this client\n");
return -EINVAL;
}
}
if (NULL !=
(item =
drm_lookup_ref_object(priv, referenced_object, ref_action))) {
atomic_inc(&item->refcount);
return drm_object_ref_action(priv, referenced_object,
ref_action);
}
item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
if (item == NULL) {
DRM_ERROR("Could not allocate reference object\n");
return -ENOMEM;
}
atomic_set(&item->refcount, 1);
item->hash.key = (unsigned long)referenced_object;
ret = drm_ht_insert_item(ht, &item->hash);
item->unref_action = ref_action;
if (ret)
goto out;
list_add(&item->list, &priv->refd_objects);
ret = drm_object_ref_action(priv, referenced_object, ref_action);
out:
return ret;
}
drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
drm_user_object_t * referenced_object,
drm_ref_t ref_action)
{
drm_hash_item_t *hash;
int ret;
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
(unsigned long)referenced_object, &hash);
if (ret)
return NULL;
return drm_hash_entry(hash, drm_ref_object_t, hash);
}
static void drm_remove_other_references(drm_file_t * priv,
drm_user_object_t * ro)
{
int i;
drm_open_hash_t *ht;
drm_hash_item_t *hash;
drm_ref_object_t *item;
for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
ht = &priv->refd_object_hash[i];
while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
item = drm_hash_entry(hash, drm_ref_object_t, hash);
drm_remove_ref_object(priv, item);
}
}
}
void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
{
int ret;
drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key;
drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
drm_ref_t unref_action;
unref_action = item->unref_action;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(ht, &item->hash);
BUG_ON(ret);
list_del_init(&item->list);
if (unref_action == _DRM_REF_USE)
drm_remove_other_references(priv, user_object);
drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
}
switch (unref_action) {
case _DRM_REF_USE:
drm_deref_user_object(priv, user_object);
break;
default:
BUG_ON(!user_object->unref);
user_object->unref(priv, user_object, unref_action);
break;
}
}
int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
drm_object_type_t type, drm_user_object_t ** object)
{
drm_device_t *dev = priv->head->dev;
drm_user_object_t *uo;
int ret;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, user_token);
if (!uo || (uo->type != type)) {
ret = -EINVAL;
goto out_err;
}
ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
if (ret)
goto out_err;
mutex_unlock(&dev->struct_mutex);
*object = uo;
DRM_ERROR("Referenced an object\n");
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
drm_object_type_t type)
{
drm_device_t *dev = priv->head->dev;
drm_user_object_t *uo;
drm_ref_object_t *ro;
int ret;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, user_token);
if (!uo || (uo->type != type)) {
ret = -EINVAL;
goto out_err;
}
ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
if (!ro) {
ret = -EINVAL;
goto out_err;
}
drm_remove_ref_object(priv, ro);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("Unreferenced an object\n");
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}

View File

@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_objects_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@ -67,6 +69,7 @@ static struct drm_proc_list {
{"clients", drm_clients_info},
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"objects", drm_objects_info},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@ -238,10 +241,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
type = "??";
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags, r_list->user_token);
map->size, type, map->flags,
(unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
@ -417,6 +421,89 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
return ret;
}
/**
* Called when "/proc/dri/.../objects" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*/
static int drm__objects_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
drm_buffer_manager_t *bm = &dev->bm;
drm_fence_manager_t *fm = &dev->fm;
drm_u64_t used_mem;
drm_u64_t low_mem;
drm_u64_t high_mem;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
if (fm->initialized) {
DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",
atomic_read(&fm->count));
} else {
DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
}
if (bm->initialized) {
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
atomic_read(&bm->count));
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
} else {
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
}
drm_query_memctl(&used_mem, &low_mem, &high_mem);
if (used_mem > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
(unsigned long) (used_mem >> PAGE_SHIFT));
} else {
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
(unsigned long) used_mem);
}
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
(unsigned long) (low_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
(unsigned long) (high_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("\n");
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_objects_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *) data;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__objects_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Called when "/proc/dri/.../clients" is read.
*
@ -500,7 +587,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
pt->pid,
vma->vm_start,
vma->vm_end,
@ -510,7 +597,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
VM_OFFSET(vma));
vma->vm_pgoff);
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);

View File

@ -31,7 +31,6 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include <linux/vmalloc.h>
#include "drmP.h"

View File

@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
static void drm_sman_mm_free(void *private, void *ref)
{
drm_mm_t *mm = (drm_mm_t *) private;
drm_mm_node_t *node = (drm_mm_node_t *) ref;
drm_mm_put_block(mm, node);
drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)

View File

@ -54,6 +54,11 @@ drm_head_t **drm_heads;
struct drm_sysfs_class *drm_class;
struct proc_dir_entry *drm_proc_root;
drm_cache_t drm_cache =
{ .mm = NULL,
.fence_object = NULL
};
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
@ -66,6 +71,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->bm.init_mutex);
dev->pdev = pdev;
dev->pci_device = pdev->device;
@ -76,14 +82,28 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
#endif
dev->irq = pdev->irq;
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
if (drm_ht_create(&dev->map_hash, 12)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
/* the DRM has 6 counters */
dev->counters = 6;
@ -125,6 +145,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
drm_fence_manager_init(dev);
return 0;
error_out_unreg:

View File

@ -11,7 +11,6 @@
*
*/
#include <linux/config.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/err.h>

519
linux-core/drm_ttm.c Normal file
View File

@ -0,0 +1,519 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#include "drmP.h"
static void drm_ttm_ipi_handler(void *null)
{
flush_agp_cache();
}
static void drm_ttm_cache_flush(void)
{
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
/*
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
static void *ttm_alloc(unsigned long size, int type)
{
void *ret = NULL;
if (drm_alloc_memctl(size))
return NULL;
if (size <= PAGE_SIZE) {
ret = drm_alloc(size, type);
}
if (!ret) {
ret = vmalloc(size);
}
if (!ret) {
drm_free_memctl(size);
}
return ret;
}
static void ttm_free(void *pointer, unsigned long size, int type)
{
if ((unsigned long)pointer >= VMALLOC_START &&
(unsigned long)pointer <= VMALLOC_END) {
vfree(pointer);
} else {
drm_free(pointer, size, type);
}
drm_free_memctl(size);
}
/*
* Unmap all vma pages from vmas mapping this ttm.
*/
static int unmap_vma_pages(drm_ttm_t * ttm)
{
drm_device_t *dev = ttm->dev;
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
#endif
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_finish_unmap(ttm);
#endif
return 0;
}
/*
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
*/
static int drm_set_caching(drm_ttm_t * ttm, int noncached)
{
int i;
struct page **cur_page;
int do_tlbflush = 0;
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
return 0;
if (noncached)
drm_ttm_cache_flush();
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (*cur_page) {
if (!PageHighMem(*cur_page)) {
if (noncached) {
map_page_into_agp(*cur_page);
} else {
unmap_page_from_agp(*cur_page);
}
do_tlbflush = 1;
}
}
}
if (do_tlbflush)
flush_agp_mappings();
DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
return 0;
}
/*
* Free all resources associated with a ttm.
*/
int drm_destroy_ttm(drm_ttm_t * ttm)
{
int i;
struct page **cur_page;
drm_ttm_backend_t *be;
if (!ttm)
return 0;
if (atomic_read(&ttm->vma_count) > 0) {
ttm->destroy = 1;
DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
return -EBUSY;
}
DRM_DEBUG("Destroying a ttm\n");
#ifdef DRM_TTM_ODD_COMPAT
BUG_ON(!list_empty(&ttm->vma_list));
BUG_ON(!list_empty(&ttm->p_mm_list));
#endif
be = ttm->be;
if (be) {
be->destroy(be);
ttm->be = NULL;
}
if (ttm->pages) {
drm_buffer_manager_t *bm = &ttm->dev->bm;
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
drm_set_caching(ttm, 0);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (*cur_page) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
unlock_page(*cur_page);
#else
ClearPageReserved(*cur_page);
#endif
if (page_count(*cur_page) != 1) {
DRM_ERROR("Erroneous page count. "
"Leaking pages.\n");
}
if (page_mapped(*cur_page)) {
DRM_ERROR("Erroneous map count. "
"Leaking page mappings.\n");
}
/*
* End debugging.
*/
drm_free_gatt_pages(*cur_page, 0);
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
}
}
ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
ttm->pages = NULL;
}
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
return 0;
}
static int drm_ttm_populate(drm_ttm_t * ttm)
{
struct page *page;
unsigned long i;
drm_buffer_manager_t *bm;
drm_ttm_backend_t *be;
if (ttm->state != ttm_unpopulated)
return 0;
bm = &ttm->dev->bm;
be = ttm->be;
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
return -ENOMEM;
}
page = drm_alloc_gatt_pages(0);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return -ENOMEM;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
ttm->pages[i] = page;
++bm->cur_pages;
}
}
be->populate(be, ttm->num_pages, ttm->pages);
ttm->state = ttm_unbound;
return 0;
}
/*
* Initialize a ttm.
*/
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
{
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
drm_ttm_t *ttm;
if (!bo_driver)
return NULL;
ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
if (!ttm)
return NULL;
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&ttm->p_mm_list);
INIT_LIST_HEAD(&ttm->vma_list);
#endif
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
ttm->destroy = 0;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->page_flags = 0;
/*
* Account also for AGP module memory usage.
*/
ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
if (!ttm->pages) {
drm_destroy_ttm(ttm);
DRM_ERROR("Failed allocating page table\n");
return NULL;
}
memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
ttm->be = bo_driver->create_ttm_backend_entry(dev);
if (!ttm->be) {
drm_destroy_ttm(ttm);
DRM_ERROR("Failed creating ttm backend entry\n");
return NULL;
}
ttm->state = ttm_unpopulated;
return ttm;
}
/*
* Unbind a ttm region from the aperture.
*/
int drm_evict_ttm(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
int ret;
switch (ttm->state) {
case ttm_bound:
if (be->needs_ub_cache_adjust(be)) {
ret = unmap_vma_pages(ttm);
if (ret) {
return ret;
}
}
be->unbind(be);
break;
default:
break;
}
ttm->state = ttm_evicted;
return 0;
}
void drm_fixup_ttm_caching(drm_ttm_t * ttm)
{
if (ttm->state == ttm_evicted) {
drm_ttm_backend_t *be = ttm->be;
if (be->needs_ub_cache_adjust(be)) {
drm_set_caching(ttm, 0);
}
ttm->state = ttm_unbound;
}
}
int drm_unbind_ttm(drm_ttm_t * ttm)
{
int ret = 0;
if (ttm->state == ttm_bound)
ret = drm_evict_ttm(ttm);
if (ret)
return ret;
drm_fixup_ttm_caching(ttm);
return 0;
}
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
{
int ret = 0;
drm_ttm_backend_t *be;
if (!ttm)
return -EINVAL;
if (ttm->state == ttm_bound)
return 0;
be = ttm->be;
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && !cached) {
ret = unmap_vma_pages(ttm);
if (ret)
return ret;
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
#ifdef DRM_ODD_MM_COMPAT
else if (ttm->state == ttm_evicted && !cached) {
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
}
#endif
if ((ret = be->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be))
drm_ttm_unlock_mm(ttm);
#endif
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be)) {
ret = drm_ttm_remap_bound(ttm);
if (ret)
return ret;
}
#endif
return 0;
}
/*
* dev->struct_mutex locked.
*/
static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
{
drm_map_list_t *list = &object->map_list;
drm_local_map_t *map;
if (list->user_token)
drm_ht_remove_item(&dev->map_hash, &list->hash);
if (list->file_offset_node) {
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
map = list->map;
if (map) {
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
if (ttm) {
if (drm_destroy_ttm(ttm) != -EBUSY) {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
} else {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
}
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
drm_ttm_object_remove(dev, to);
}
}
void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&to->usage) == 0)
drm_ttm_object_remove(dev, to);
mutex_unlock(&dev->struct_mutex);
}
}
/*
* Create a ttm and add it to the drm book-keeping.
* dev->struct_mutex locked.
*/
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
uint32_t flags, drm_ttm_object_t ** ttm_object)
{
drm_ttm_object_t *object;
drm_map_list_t *list;
drm_local_map_t *map;
drm_ttm_t *ttm;
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
if (!object)
return -ENOMEM;
object->flags = flags;
list = &object->map_list;
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
if (!list->map) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map = list->map;
ttm = drm_init_ttm(dev, size);
if (!ttm) {
DRM_ERROR("Could not create ttm\n");
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map->offset = (unsigned long)ttm;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = ttm->num_pages * PAGE_SIZE;
map->handle = (void *)object;
/*
* Add a one-page "hole" to the block size to avoid the mm subsystem
* merging vmas.
* FIXME: Is this really needed?
*/
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
ttm->num_pages + 1, 0, 0);
if (!list->file_offset_node) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
ttm->num_pages + 1, 0);
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
ttm->mapping_offset = list->hash.key;
atomic_set(&object->usage, 1);
*ttm_object = object;
return 0;
}

145
linux-core/drm_ttm.h Normal file
View File

@ -0,0 +1,145 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_TTM_H
#define _DRM_TTM_H
#define DRM_HAS_TTM
/*
* The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart. Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
#define DRM_BE_FLAG_CBA 0x00000004
typedef struct drm_ttm_backend {
unsigned long aperture_base;
void *private;
uint32_t flags;
uint32_t drm_map_type;
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_t;
typedef struct drm_ttm {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
unsigned long aper_offset;
atomic_t vma_count;
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
drm_ttm_backend_t *be;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound,
ttm_unpopulated,
} state;
#ifdef DRM_ODD_MM_COMPAT
struct list_head vma_list;
struct list_head p_mm_list;
#endif
} drm_ttm_t;
typedef struct drm_ttm_object {
atomic_t usage;
uint32_t flags;
drm_map_list_t map_list;
} drm_ttm_object_t;
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
uint32_t flags,
drm_ttm_object_t ** ttm_object);
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
drm_ttm_object_t * to);
extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
drm_ttm_object_t * to);
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
uint32_t handle,
int check_owner);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern int drm_unbind_ttm(drm_ttm_t * ttm);
/*
* Evict a ttm region. Keeps Aperture caching policy.
*/
extern int drm_evict_ttm(drm_ttm_t * ttm);
extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
{
return (drm_ttm_t *) to->map_list.map->offset;
}
#define DRM_MASK_VAL(dest, mask, val) \
(dest) = ((dest) & ~(mask)) | ((val) & (mask));
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
#define DRM_TTM_PAGE_UNCACHED 0x01
#define DRM_TTM_PAGE_USED 0x02
#define DRM_TTM_PAGE_BOUND 0x04
#define DRM_TTM_PAGE_PRESENT 0x08
#endif

View File

@ -34,12 +34,42 @@
*/
#include "drmP.h"
#if defined(__ia64__)
#include <linux/efi.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static void drm_vm_ttm_close(struct vm_area_struct *vma);
static int drm_vm_ttm_open(struct vm_area_struct *vma);
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
pgprot_val(tmp) |= _PAGE_PCD;
pgprot_val(tmp) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map_type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
/**
* \c nopage method for AGP virtual memory.
@ -70,7 +100,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error;
if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash))
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error;
r_list = drm_hash_entry(hash, drm_map_list_t, hash);
@ -129,6 +159,95 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
static
#endif
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
unsigned long pfn;
int err;
pgprot_t pgprot;
if (!map) {
data->type = VM_FAULT_OOM;
return NULL;
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
return NULL;
}
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
/*
* Perhaps retry here?
*/
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
data->type = VM_FAULT_OOM;
goto out;
}
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
drm_free_memctl(PAGE_SIZE);
data->type = VM_FAULT_OOM;
goto out;
}
++bm->cur_pages;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
}
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
/*
* FIXME: Check can't map aperture flag.
*/
pfn = ttm->aper_offset + page_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
} else {
pfn = page_to_pfn(page);
pgprot = vma->vm_page_prot;
}
err = vm_insert_pfn(vma, address, pfn, pgprot);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out:
mutex_unlock(&dev->struct_mutex);
return NULL;
}
#endif
/**
* \c nopage method for shared virtual memory.
*
@ -198,7 +317,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
} else {
dev->vmalist = pt->next;
}
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
@ -243,6 +362,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_TTM:
BUG_ON(1);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
@ -358,6 +480,7 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
@ -414,6 +537,20 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
static struct vm_operations_struct drm_vm_ttm_ops = {
.nopage = drm_vm_ttm_nopage,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
#else
static struct vm_operations_struct drm_vm_ttm_ops = {
.fault = drm_vm_ttm_fault,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
#endif
/**
* \c open method for shared virtual memory.
*
@ -432,7 +569,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
@ -443,6 +580,29 @@ static void drm_vm_open(struct vm_area_struct *vma)
}
}
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vm_open(vma);
mutex_lock(&dev->struct_mutex);
ttm = (drm_ttm_t *) map->offset;
atomic_inc(&ttm->vma_count);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_add_vma(ttm, vma);
#endif
mutex_unlock(&dev->struct_mutex);
return 0;
}
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
{
drm_vm_ttm_open(vma);
}
/**
* \c close method for all virtual memory types.
*
@ -469,13 +629,42 @@ static void drm_vm_close(struct vm_area_struct *vma)
} else {
dev->vmalist = pt->next;
}
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
}
mutex_unlock(&dev->struct_mutex);
}
static void drm_vm_ttm_close(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_ttm_t *ttm;
drm_device_t *dev;
int ret;
drm_vm_close(vma);
if (map) {
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_delete_vma(ttm, vma);
#endif
if (atomic_dec_and_test(&ttm->vma_count)) {
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
BUG_ON(ret);
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
mutex_unlock(&dev->struct_mutex);
}
return;
}
/**
* mmap DMA memory.
*
@ -496,8 +685,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
lock_kernel();
dev = priv->head->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
@ -572,8 +761,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
unsigned long offset = 0;
drm_hash_item_t *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
if (!priv->authenticated)
return -EACCES;
@ -582,7 +771,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
* the AGP mapped at physical address 0
* --BenH.
*/
if (!VM_OFFSET(vma)
if (!vma->vm_pgoff
#if __OS_HAS_AGP
&& (!dev->agp
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
@ -590,7 +779,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
)
return drm_mmap_dma(filp, vma);
if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) {
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
DRM_ERROR("Could not find map\n");
return -EINVAL;
}
@ -636,27 +825,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
#if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
#if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot =
pgprot_noncached(vma->vm_page_prot);
#endif
offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >>PAGE_SHIFT,
@ -703,6 +874,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED;
#endif
break;
case _DRM_TTM: {
vma->vm_ops = &drm_vm_ttm_ops;
vma->vm_private_data = (void *) map;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#ifdef DRM_ODD_MM_COMPAT
mutex_lock(&dev->struct_mutex);
drm_ttm_map_bound(vma);
mutex_unlock(&dev->struct_mutex);
#endif
if (drm_vm_ttm_open(vma))
return -EAGAIN;
return 0;
}
default:
return -EINVAL; /* This should never happen. */
}

View File

@ -4,7 +4,6 @@
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <asm/shmparam.h>

View File

@ -30,7 +30,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"

View File

@ -32,8 +32,6 @@
* Keith Whitwell <keith@tungstengraphics.com>
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"

66
linux-core/i915_buffer.c Normal file
View File

@ -0,0 +1,66 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
}
int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
{
*class = 0;
if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
*/
uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
if (flags & DRM_BO_FLAG_READ)
flush_cmd |= MI_READ_FLUSH;
if (flags & DRM_BO_FLAG_EXE)
flush_cmd |= MI_EXE_FLUSH;
return i915_emit_mi_flush(dev, flush_cmd);
}

View File

@ -38,6 +38,27 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
#ifdef I915_HAVE_FENCE
static drm_fence_driver_t i915_fence_driver = {
.no_types = 2,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xffffffffU,
.lazy_capable = 1,
.emit = i915_fence_emit_sequence,
.poke_flush = i915_poke_flush,
};
#endif
#ifdef I915_HAVE_BUFFER
static drm_bo_driver_t i915_bo_driver = {
.iomap = {NULL, NULL},
.cached = {1, 1},
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
.fence_type = i915_fence_types,
.invalidate_caches = i915_invalidate_caches
};
#endif
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
@ -79,7 +100,12 @@ static struct drm_driver driver = {
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
#ifdef I915_HAVE_FENCE
.fence_driver = &i915_fence_driver,
#endif
#ifdef I915_HAVE_BUFFER
.bo_driver = &i915_bo_driver,
#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,

146
linux-core/i915_fence.c Normal file
View File

@ -0,0 +1,146 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/*
* Implements an intel sync flush operation.
*/
static void i915_perform_flush(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t flush_flags = 0;
uint32_t flush_sequence = 0;
uint32_t i_status;
uint32_t diff;
uint32_t sequence;
if (!dev_priv)
return;
if (fm->pending_exe_flush) {
sequence = READ_BREADCRUMB(dev_priv);
/*
* First update fences with the current breadcrumb.
*/
diff = sequence - fm->last_exe_flush;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
}
diff = sequence - fm->exe_flush_sequence;
if (diff < driver->wrap_diff) {
fm->pending_exe_flush = 0;
if (dev_priv->fence_irq_on) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
}
} else if (!dev_priv->fence_irq_on) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
}
if (dev_priv->flush_pending) {
i_status = READ_HWSP(dev_priv, 0);
if ((i_status & (1 << 12)) !=
(dev_priv->saved_flush_status & (1 << 12))) {
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
}
}
if (fm->pending_flush && !dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fm->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
fm->pending_flush = 0;
}
if (dev_priv->flush_pending) {
i_status = READ_HWSP(dev_priv, 0);
if ((i_status & (1 << 12)) !=
(dev_priv->saved_flush_status & (1 << 12))) {
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
}
}
}
void i915_poke_flush(drm_device_t * dev)
{
drm_fence_manager_t *fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
i915_perform_flush(dev);
write_unlock_irqrestore(&fm->lock, flags);
}
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (!dev_priv)
return -EINVAL;
i915_emit_irq(dev);
*sequence = (uint32_t) dev_priv->counter;
*native_type = DRM_FENCE_TYPE_EXE;
if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
*native_type |= DRM_I915_FENCE_TYPE_RW;
return 0;
}
void i915_fence_handler(drm_device_t * dev)
{
drm_fence_manager_t *fm = &dev->fm;
write_lock(&fm->lock);
i915_perform_flush(dev);
write_unlock(&fm->lock);
}

View File

@ -22,7 +22,6 @@
/* derived from tdfx_drv.c */
#include <linux/config.h>
#include "drmP.h"
#include "imagine_drv.h"

View File

@ -27,7 +27,6 @@
* Leif Delgass <ldelgass@retinalburn.net>
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "mach64_drm.h"

View File

@ -29,7 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
@ -49,6 +48,7 @@ static struct drm_driver driver = {
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
.dev_priv_size = sizeof (drm_mga_buf_priv_t),
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,

View File

@ -32,7 +32,6 @@
* Lars Knoll <lars@trolltech.com>
*/
#include <linux/config.h>
#include "drmP.h"
#include "nv_drv.h"

View File

@ -29,7 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"

View File

@ -29,7 +29,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"

View File

@ -23,7 +23,6 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"

View File

@ -25,7 +25,6 @@
*
*/
#include <linux/config.h>
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"

View File

@ -30,7 +30,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "drmP.h"
#include "tdfx_drv.h"

View File

@ -69,9 +69,6 @@
#endif
#if defined(__linux__)
#if defined(__KERNEL__)
#include <linux/config.h>
#endif
#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IOC_VOID _IOC_NONE
@ -134,6 +131,12 @@
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
#if defined(__linux__)
#if defined(__KERNEL__)
typedef __u64 drm_u64_t;
#else
typedef unsigned long long drm_u64_t;
#endif
typedef unsigned int drm_handle_t;
#else
typedef unsigned long drm_handle_t; /**< To mapped regions */
@ -267,7 +270,8 @@ typedef enum drm_map_type {
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_TTM = 6
} drm_map_type_t;
/**
@ -656,6 +660,190 @@ typedef struct drm_set_version {
int drm_dd_minor;
} drm_set_version_t;
#define DRM_FENCE_FLAG_EMIT 0x00000001
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
/* Reserved for driver use */
#define DRM_FENCE_MASK_DRIVER 0xFF000000
#define DRM_FENCE_TYPE_EXE 0x00000001
typedef struct drm_fence_arg {
unsigned handle;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
unsigned expand_pad[4]; /*Future expansion */
enum {
drm_fence_create,
drm_fence_destroy,
drm_fence_reference,
drm_fence_unreference,
drm_fence_signaled,
drm_fence_flush,
drm_fence_wait,
drm_fence_emit,
drm_fence_buffers
} op;
} drm_fence_arg_t;
/* Buffer permissions, referring to how the GPU uses the buffers.
these translate to fence types used for the buffers.
Typically a texture buffer is read, A destination buffer is write and
a command (batch-) buffer is exe. Can be or-ed together. */
#define DRM_BO_FLAG_READ 0x00000001
#define DRM_BO_FLAG_WRITE 0x00000002
#define DRM_BO_FLAG_EXE 0x00000004
/*
* Status flags. Can be read to determine the actual state of a buffer.
*/
/*
* Cannot evict this buffer. Not even with force. This type of buffer should
* only be available for root, and must be manually removed before buffer
* manager shutdown or swapout.
*/
#define DRM_BO_FLAG_NO_EVICT 0x00000010
/* Always keep a system memory shadow to a vram buffer */
#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020
/* The buffer is shareable with other processes */
#define DRM_BO_FLAG_SHAREABLE 0x00000040
/* The buffer is currently cached */
#define DRM_BO_FLAG_CACHED 0x00000080
/* Make sure that every time this buffer is validated, it ends up on the same
* location. The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or swapout. Not supported yet.*/
#define DRM_BO_FLAG_NO_MOVE 0x00000100
/* Make sure the buffer is in cached memory when mapped for reading */
#define DRM_BO_FLAG_READ_CACHED 0x00080000
/* When there is a choice between VRAM and TT, prefer VRAM.
The default behaviour is to prefer TT. */
#define DRM_BO_FLAG_PREFER_VRAM 0x00040000
/* Bind this buffer cached if the hardware supports it. */
#define DRM_BO_FLAG_BIND_CACHED 0x0002000
/* System Memory */
#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT 0x02000000
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM 0x04000000
/* Unmappable Vram memory */
#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000
/* Memory flag mask */
#define DRM_BO_MASK_MEM 0xFF000000
/* When creating a buffer, Avoid system storage even if allowed */
#define DRM_BO_HINT_AVOID_LOCAL 0x00000001
/* Don't block on validate and map */
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/* Don't place this buffer on the unfenced list.*/
#define DRM_BO_HINT_DONT_FENCE 0x00000004
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
/* Driver specific flags. Could be for example rendering engine */
#define DRM_BO_MASK_DRIVER 0x00F00000
typedef enum {
drm_bo_type_dc,
drm_bo_type_user,
drm_bo_type_fake
}drm_bo_type_t;
typedef struct drm_bo_arg_request {
unsigned handle; /* User space handle */
unsigned mask;
unsigned hint;
drm_u64_t size;
drm_bo_type_t type;
unsigned arg_handle;
drm_u64_t buffer_start;
unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
enum {
drm_bo_create,
drm_bo_validate,
drm_bo_map,
drm_bo_unmap,
drm_bo_fence,
drm_bo_destroy,
drm_bo_reference,
drm_bo_unreference,
drm_bo_info,
drm_bo_wait_idle,
drm_bo_ref_fence
} op;
} drm_bo_arg_request_t;
/*
* Reply flags
*/
#define DRM_BO_REP_BUSY 0x00000001
typedef struct drm_bo_arg_reply {
int ret;
unsigned handle;
unsigned flags;
drm_u64_t size;
drm_u64_t offset;
drm_u64_t arg_handle;
unsigned mask;
drm_u64_t buffer_start;
unsigned fence_flags;
unsigned rep_flags;
unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
}drm_bo_arg_reply_t;
typedef struct drm_bo_arg{
int handled;
drm_u64_t next;
union {
drm_bo_arg_request_t req;
drm_bo_arg_reply_t rep;
} d;
} drm_bo_arg_t;
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_VRAM_NM 3
#define DRM_BO_MEM_TYPES 2 /* For now. */
typedef union drm_mm_init_arg{
struct {
enum {
mm_init,
mm_takedown,
mm_query,
mm_lock,
mm_unlock
} op;
drm_u64_t p_offset;
drm_u64_t p_size;
unsigned mem_type;
unsigned expand_pad[8]; /*Future expansion */
} req;
struct {
drm_handle_t mm_sarea;
unsigned expand_pad[8]; /*Future expansion */
} rep;
} drm_mm_init_arg_t;
/**
* \name Ioctls Definitions
*/
@ -721,17 +909,23 @@ typedef struct drm_set_version {
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t)
#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t)
#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
/*@}*/
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x79.
* The device specific ioctl range is from 0x40 to 0x99.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
#endif

View File

@ -196,9 +196,10 @@ static int i915_initialize(drm_device_t * dev,
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
dev->dev_private = (void *)dev_priv;
#ifdef I915_HAVE_BUFFER
drm_bo_driver_init(dev);
#endif
return 0;
}
@ -435,17 +436,39 @@ static void i915_emit_breadcrumb(drm_device_t *dev)
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
#ifdef I915_HAVE_FENCE
drm_fence_flush_old(dev, dev_priv->counter);
#endif
}
int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t flush_cmd = CMD_MI_FLUSH;
RING_LOCALS;
flush_cmd |= flush;
i915_kernel_lost_context(dev);
BEGIN_LP_RING(4);
OUT_RING(flush_cmd);
OUT_RING(0);
OUT_RING(0);
OUT_RING(0);
ADVANCE_LP_RING();
return 0;
}
static int i915_dispatch_cmdbuffer(drm_device_t * dev,
drm_i915_cmdbuffer_t * cmd)
{
@ -566,7 +589,9 @@ static int i915_dispatch_flip(drm_device_t * dev)
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
#ifdef I915_HAVE_FENCE
drm_fence_flush_old(dev, dev_priv->counter);
#endif
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
@ -680,6 +705,7 @@ static int i915_flip_bufs(DRM_IOCTL_ARGS)
return i915_dispatch_flip(dev);
}
static int i915_getparam(DRM_IOCTL_ARGS)
{
DRM_DEVICE;

View File

@ -115,6 +115,16 @@ typedef struct _drm_i915_sarea {
int pipeB_h;
} drm_i915_sarea_t;
/* Driver specific fence types and classes.
*/
/* The only fence class we support */
#define DRM_I915_FENCE_CLASS_ACCEL 0
/* Fence type that guarantees read-write flush */
#define DRM_I915_FENCE_TYPE_RW 2
/* MI_FLUSH programmed just before the fence */
#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1

View File

@ -35,9 +35,9 @@
#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
#define DRIVER_NAME "i915"
#define DRIVER_NAME "i915-mm"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20060119"
#define DRIVER_DATE "20060929"
/* Interface history:
*
@ -50,9 +50,14 @@
* - Support vertical blank on secondary display pipe
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 6
#define DRIVER_MINOR 7
#define DRIVER_PATCHLEVEL 0
#if defined(__linux__)
#define I915_HAVE_FENCE
#define I915_HAVE_BUFFER
#endif
typedef struct _drm_i915_ring_buffer {
int tail_mask;
unsigned long Start;
@ -90,7 +95,7 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
dma_addr_t dma_status_page;
unsigned long counter;
uint32_t counter;
unsigned int cpp;
int back_offset;
@ -108,6 +113,18 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
spinlock_t user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
uint32_t irq_enable_reg;
int irq_enabled;
#ifdef I915_HAVE_FENCE
uint32_t flush_sequence;
uint32_t flush_flags;
uint32_t flush_pending;
uint32_t saved_flush_status;
#endif
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
@ -125,6 +142,8 @@ extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
extern int i915_driver_device_is_agp(drm_device_t * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
/* i915_irq.c */
extern int i915_irq_emit(DRM_IOCTL_ARGS);
@ -138,6 +157,9 @@ extern void i915_driver_irq_postinstall(drm_device_t * dev);
extern void i915_driver_irq_uninstall(drm_device_t * dev);
extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
extern int i915_emit_irq(drm_device_t * dev);
extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
extern int i915_vblank_swap(DRM_IOCTL_ARGS);
/* i915_mem.c */
@ -148,6 +170,23 @@ extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(drm_device_t * dev,
DRMFILE filp, struct mem_block *heap);
#ifdef I915_HAVE_FENCE
/* i915_fence.c */
extern void i915_fence_handler(drm_device_t *dev);
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
uint32_t *sequence,
uint32_t *native_type);
extern void i915_poke_flush(drm_device_t *dev);
#endif
#ifdef I915_HAVE_BUFFER
/* i915_buffer.c */
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
#endif
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
@ -198,6 +237,11 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define CMD_MI_FLUSH (0x04 << 23)
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_READ_FLUSH (1 << 0)
#define MI_EXE_FLUSH (1 << 1)
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
@ -207,6 +251,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
#define I915REG_INSTPM 0x020c0
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@ -292,6 +337,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#endif

View File

@ -138,10 +138,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
temp = I915_READ16(I915REG_INT_IDENTITY_R);
temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
#if 0
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
#endif
if (temp == 0)
return IRQ_NONE;
@ -149,8 +150,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
if (temp & USER_INT_FLAG)
if (temp & USER_INT_FLAG) {
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
#endif
}
if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
int vblank_pipe = dev_priv->vblank_pipe;
@ -178,7 +183,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
static int i915_emit_irq(drm_device_t * dev)
int i915_emit_irq(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -208,6 +213,28 @@ static int i915_emit_irq(drm_device_t * dev)
}
void i915_user_irq_on(drm_i915_private_t *dev_priv)
{
spin_lock(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
dev_priv->irq_enable_reg |= USER_INT_FLAG;
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
}
spin_unlock(&dev_priv->user_irq_lock);
}
void i915_user_irq_off(drm_i915_private_t *dev_priv)
{
spin_lock(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
// dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
// I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
}
spin_unlock(&dev_priv->user_irq_lock);
}
static int i915_wait_irq(drm_device_t * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -221,8 +248,10 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
i915_user_irq_on(dev_priv);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
i915_user_irq_off(dev_priv);
if (ret == DRM_ERR(EBUSY)) {
DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
@ -316,15 +345,15 @@ int i915_irq_wait(DRM_IOCTL_ARGS)
static void i915_enable_interrupt (drm_device_t *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 flag;
flag = 0;
dev_priv->irq_enable_reg = USER_INT_FLAG;
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
flag |= VSYNC_PIPEA_FLAG;
dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
flag |= VSYNC_PIPEB_FLAG;
dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
/* Set the vblank monitor pipe
@ -497,7 +526,7 @@ void i915_driver_irq_preinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
I915_WRITE16(I915REG_HWSTAM, 0xfffe);
I915_WRITE16(I915REG_HWSTAM, 0xeffe);
I915_WRITE16(I915REG_INT_MASK_R, 0x0);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
}
@ -510,10 +539,26 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
dev_priv->user_irq_refcount = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
/*
* Initialize the hardware status page IRQ location.
*/
I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
}
void i915_driver_irq_uninstall(drm_device_t * dev)
@ -523,6 +568,7 @@ void i915_driver_irq_uninstall(drm_device_t * dev)
if (!dev_priv)
return;
dev_priv->irq_enabled = 0;
I915_WRITE16(I915REG_HWSTAM, 0xffff);
I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);

View File

@ -725,6 +725,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
dev_priv->status = NULL;
}
if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev,
init->buffers_offset);
if (!dev->agp_buffer_map) {

View File

@ -22,7 +22,6 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"