commit
14e3f2711e
|
@ -170,7 +170,7 @@ MALLOC_DECLARE(M_DRM);
|
|||
#define wait_queue_head_t atomic_t
|
||||
#define DRM_WAKEUP(w) wakeup((void *)w)
|
||||
#define DRM_WAKEUP_INT(w) wakeup(w)
|
||||
#define DRM_INIT_WAITQUEUE(queue) do {} while (0)
|
||||
#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0)
|
||||
|
||||
#if defined(__FreeBSD__) && __FreeBSD_version < 502109
|
||||
#define bus_alloc_resource_any(dev, type, rid, flags) \
|
||||
|
@ -270,6 +270,7 @@ extern struct cfdriver drm_cd;
|
|||
#endif
|
||||
|
||||
typedef unsigned long dma_addr_t;
|
||||
typedef u_int64_t u64;
|
||||
typedef u_int32_t u32;
|
||||
typedef u_int16_t u16;
|
||||
typedef u_int8_t u8;
|
||||
|
@ -713,6 +714,9 @@ struct drm_device {
|
|||
struct drm_driver_info driver;
|
||||
drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
|
||||
|
||||
u_int16_t pci_device; /* PCI device id */
|
||||
u_int16_t pci_vendor; /* PCI vendor id */
|
||||
|
||||
char *unique; /* Unique identifier: e.g., busid */
|
||||
int unique_len; /* Length of unique field */
|
||||
#ifdef __FreeBSD__
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
|
||||
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
|
||||
*/
|
||||
/*-
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
int drm_adddraw(DRM_IOCTL_ARGS)
|
||||
{
|
||||
drm_draw_t draw;
|
||||
|
||||
draw.handle = 0; /* NOOP */
|
||||
DRM_DEBUG("%d\n", draw.handle);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_rmdraw(DRM_IOCTL_ARGS)
|
||||
{
|
||||
return 0; /* NOOP */
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
../shared-core/drm_drawable.c
|
|
@ -516,6 +516,9 @@ static int drm_load(drm_device_t *dev)
|
|||
dev->pci_slot = pci_get_slot(dev->device);
|
||||
dev->pci_func = pci_get_function(dev->device);
|
||||
|
||||
dev->pci_vendor = pci_get_vendor(dev->device);
|
||||
dev->pci_device = pci_get_device(dev->device);
|
||||
|
||||
TAILQ_INIT(&dev->maplist);
|
||||
|
||||
drm_mem_init();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* i915_drv.c -- ATI Radeon driver -*- linux-c -*-
|
||||
/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
|
||||
* Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
|
||||
*/
|
||||
/*-
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
AC_PREREQ(2.57)
|
||||
AC_INIT([libdrm], 2.0.2, [dri-devel@lists.sourceforge.net], libdrm)
|
||||
AC_INIT([libdrm], 2.3.0, [dri-devel@lists.sourceforge.net], libdrm)
|
||||
AC_CONFIG_SRCDIR([Makefile.am])
|
||||
AM_INIT_AUTOMAKE([dist-bzip2])
|
||||
|
||||
|
@ -30,6 +30,7 @@ AC_PROG_LIBTOOL
|
|||
AC_PROG_CC
|
||||
|
||||
AC_HEADER_STDC
|
||||
AC_SYS_LARGEFILE
|
||||
|
||||
pkgconfigdir=${libdir}/pkgconfig
|
||||
AC_SUBST(pkgconfigdir)
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
|
||||
libdrm_la_LTLIBRARIES = libdrm.la
|
||||
libdrm_ladir = $(libdir)
|
||||
libdrm_la_LDFLAGS = -version-number 2:0:0 -no-undefined
|
||||
libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
|
||||
|
||||
AM_CFLAGS = -I$(top_srcdir)/shared-core
|
||||
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c
|
||||
|
||||
libdrmincludedir = ${includedir}
|
||||
libdrminclude_HEADERS = xf86drm.h
|
||||
libdrminclude_HEADERS = xf86drm.h xf86mm.h
|
||||
|
||||
EXTRA_DIST = ChangeLog TODO
|
||||
|
|
1196
libdrm/xf86drm.c
1196
libdrm/xf86drm.c
File diff suppressed because it is too large
Load Diff
|
@ -36,6 +36,8 @@
|
|||
#ifndef _XF86DRM_H_
|
||||
#define _XF86DRM_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <sys/types.h>
|
||||
#include <drm.h>
|
||||
|
||||
/* Defaults, if nothing set in xf86config */
|
||||
|
@ -61,6 +63,21 @@
|
|||
typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
|
||||
typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
|
||||
|
||||
typedef struct _drmServerInfo {
|
||||
int (*debug_print)(const char *format, va_list ap);
|
||||
int (*load_module)(const char *name);
|
||||
void (*get_perms)(gid_t *, mode_t *);
|
||||
} drmServerInfo, *drmServerInfoPtr;
|
||||
|
||||
typedef struct drmHashEntry {
|
||||
int fd;
|
||||
void (*f)(int, void *, void *);
|
||||
void *tagTable;
|
||||
} drmHashEntry;
|
||||
|
||||
extern void *drmGetHashTable(void);
|
||||
extern drmHashEntry *drmGetEntry(int fd);
|
||||
|
||||
/**
|
||||
* Driver version information.
|
||||
*
|
||||
|
@ -149,7 +166,8 @@ typedef enum {
|
|||
DRM_PAGE_ALIGN = 0x01,
|
||||
DRM_AGP_BUFFER = 0x02,
|
||||
DRM_SG_BUFFER = 0x04,
|
||||
DRM_FB_BUFFER = 0x08
|
||||
DRM_FB_BUFFER = 0x08,
|
||||
DRM_PCI_BUFFER_RO = 0x10
|
||||
} drmBufDescFlags;
|
||||
|
||||
typedef enum {
|
||||
|
@ -252,6 +270,8 @@ typedef struct _drmTextureRegion {
|
|||
typedef enum {
|
||||
DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
||||
DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
||||
DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
|
||||
DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
|
||||
DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
|
||||
} drmVBlankSeqType;
|
||||
|
||||
|
@ -280,7 +300,6 @@ typedef struct _drmSetVersion {
|
|||
int drm_dd_minor;
|
||||
} drmSetVersion, *drmSetVersionPtr;
|
||||
|
||||
|
||||
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
|
||||
|
||||
#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
|
||||
|
@ -484,6 +503,8 @@ do { register unsigned int __old __asm("o0"); \
|
|||
} \
|
||||
} while(0)
|
||||
|
||||
|
||||
|
||||
/* General user-level programmer's API: unprivileged */
|
||||
extern int drmAvailable(void);
|
||||
extern int drmOpen(const char *name, const char *busid);
|
||||
|
@ -544,6 +565,9 @@ extern int drmSwitchToContext(int fd, drm_context_t context);
|
|||
extern int drmDestroyContext(int fd, drm_context_t handle);
|
||||
extern int drmCreateDrawable(int fd, drm_drawable_t * handle);
|
||||
extern int drmDestroyDrawable(int fd, drm_drawable_t handle);
|
||||
extern int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
|
||||
drm_drawable_info_type_t type,
|
||||
unsigned int num, void *data);
|
||||
extern int drmCtlInstHandler(int fd, int irq);
|
||||
extern int drmCtlUninstHandler(int fd);
|
||||
|
||||
|
@ -597,6 +621,7 @@ extern int drmScatterGatherFree(int fd, drm_handle_t handle);
|
|||
extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
|
||||
|
||||
/* Support routines */
|
||||
extern void drmSetServerInfo(drmServerInfoPtr info);
|
||||
extern int drmError(int err, const char *label);
|
||||
extern void *drmMalloc(int size);
|
||||
extern void drmFree(void *pt);
|
||||
|
@ -630,4 +655,9 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
|
|||
unsigned long *prev_key, void **prev_value,
|
||||
unsigned long *next_key, void **next_value);
|
||||
|
||||
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
|
||||
extern void drmCloseOnce(int fd);
|
||||
|
||||
#include "xf86mm.h"
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,209 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _XF86MM_H_
|
||||
#define _XF86MM_H_
|
||||
#include <stddef.h>
|
||||
#include "drm.h"
|
||||
|
||||
/*
|
||||
* Note on multithreaded applications using this interface.
|
||||
* Libdrm is not threadsafe, so common buffer, TTM, and fence objects need to
|
||||
* be protected using an external mutex.
|
||||
*
|
||||
* Note: Don't protect the following functions, as it may lead to deadlocks:
|
||||
* drmBOUnmap(), drmFenceBuffers().
|
||||
* The kernel is synchronizing and refcounting buffer maps.
|
||||
* User space only needs to refcount object usage within the same application.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* List macros heavily inspired by the Linux kernel
|
||||
* list handling. No list looping yet.
|
||||
*/
|
||||
|
||||
typedef struct _drmMMListHead
|
||||
{
|
||||
struct _drmMMListHead *prev;
|
||||
struct _drmMMListHead *next;
|
||||
} drmMMListHead;
|
||||
|
||||
#define DRMINITLISTHEAD(__item) \
|
||||
do{ \
|
||||
(__item)->prev = (__item); \
|
||||
(__item)->next = (__item); \
|
||||
} while (0)
|
||||
|
||||
#define DRMLISTADD(__item, __list) \
|
||||
do { \
|
||||
(__item)->prev = (__list); \
|
||||
(__item)->next = (__list)->next; \
|
||||
(__list)->next->prev = (__item); \
|
||||
(__list)->next = (__item); \
|
||||
} while (0)
|
||||
|
||||
#define DRMLISTADDTAIL(__item, __list) \
|
||||
do { \
|
||||
(__item)->next = (__list); \
|
||||
(__item)->prev = (__list)->prev; \
|
||||
(__list)->prev->next = (__item); \
|
||||
(__list)->prev = (__item); \
|
||||
} while(0)
|
||||
|
||||
#define DRMLISTDEL(__item) \
|
||||
do { \
|
||||
(__item)->prev->next = (__item)->next; \
|
||||
(__item)->next->prev = (__item)->prev; \
|
||||
} while(0)
|
||||
|
||||
#define DRMLISTDELINIT(__item) \
|
||||
do { \
|
||||
(__item)->prev->next = (__item)->next; \
|
||||
(__item)->next->prev = (__item)->prev; \
|
||||
(__item)->next = (__item); \
|
||||
(__item)->prev = (__item); \
|
||||
} while(0)
|
||||
|
||||
#define DRMLISTENTRY(__type, __item, __field) \
|
||||
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
|
||||
|
||||
typedef struct _drmFence{
|
||||
unsigned handle;
|
||||
int class;
|
||||
unsigned type;
|
||||
unsigned flags;
|
||||
unsigned signaled;
|
||||
unsigned pad[4]; /* for future expansion */
|
||||
} drmFence;
|
||||
|
||||
typedef struct _drmBO{
|
||||
drm_bo_type_t type;
|
||||
unsigned handle;
|
||||
drm_u64_t mapHandle;
|
||||
unsigned flags;
|
||||
unsigned mask;
|
||||
unsigned mapFlags;
|
||||
unsigned long size;
|
||||
unsigned long offset;
|
||||
unsigned long start;
|
||||
unsigned replyFlags;
|
||||
unsigned fenceFlags;
|
||||
unsigned pageAlignment;
|
||||
void *virtual;
|
||||
void *mapVirtual;
|
||||
int mapCount;
|
||||
unsigned pad[8]; /* for future expansion */
|
||||
} drmBO;
|
||||
|
||||
|
||||
typedef struct _drmBONode {
|
||||
drmMMListHead head;
|
||||
drmBO *buf;
|
||||
drm_bo_arg_t bo_arg;
|
||||
unsigned long arg0;
|
||||
unsigned long arg1;
|
||||
} drmBONode;
|
||||
|
||||
typedef struct _drmBOList {
|
||||
unsigned numTarget;
|
||||
unsigned numCurrent;
|
||||
unsigned numOnList;
|
||||
drmMMListHead list;
|
||||
drmMMListHead free;
|
||||
} drmBOList;
|
||||
|
||||
/* Fencing */
|
||||
|
||||
extern int drmFenceCreate(int fd, unsigned flags, int class,
|
||||
unsigned type,
|
||||
drmFence *fence);
|
||||
extern int drmFenceDestroy(int fd, const drmFence *fence);
|
||||
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
|
||||
extern int drmFenceUnreference(int fd, const drmFence *fence);
|
||||
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
|
||||
extern int drmFenceSignaled(int fd, drmFence *fence,
|
||||
unsigned fenceType, int *signaled);
|
||||
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
|
||||
unsigned flush_type);
|
||||
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
|
||||
unsigned emit_type);
|
||||
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
|
||||
|
||||
|
||||
/*
|
||||
* Buffer object list functions.
|
||||
*/
|
||||
|
||||
extern void drmBOFreeList(drmBOList *list);
|
||||
extern int drmBOResetList(drmBOList *list);
|
||||
extern void *drmBOListIterator(drmBOList *list);
|
||||
extern void *drmBOListNext(drmBOList *list, void *iterator);
|
||||
extern drmBO *drmBOListBuf(void *iterator);
|
||||
extern int drmBOCreateList(int numTarget, drmBOList *list);
|
||||
|
||||
/*
|
||||
* Buffer object functions.
|
||||
*/
|
||||
|
||||
extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
|
||||
unsigned pageAlignment,void *user_buffer,
|
||||
drm_bo_type_t type, unsigned mask,
|
||||
unsigned hint, drmBO *buf);
|
||||
extern int drmBODestroy(int fd, drmBO *buf);
|
||||
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
|
||||
extern int drmBOUnReference(int fd, drmBO *buf);
|
||||
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
|
||||
void **address);
|
||||
extern int drmBOUnmap(int fd, drmBO *buf);
|
||||
extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
|
||||
unsigned hint);
|
||||
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
|
||||
extern int drmBOInfo(int fd, drmBO *buf);
|
||||
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
|
||||
|
||||
|
||||
extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
|
||||
unsigned mask,
|
||||
int *newItem);
|
||||
extern int drmBOValidateList(int fd, drmBOList *list);
|
||||
extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
|
||||
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
|
||||
|
||||
/*
|
||||
* Initialization functions.
|
||||
*/
|
||||
|
||||
extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
|
||||
unsigned memType);
|
||||
extern int drmMMTakedown(int fd, unsigned memType);
|
||||
extern int drmMMLock(int fd, unsigned memType);
|
||||
extern int drmMMUnlock(int fd, unsigned memType);
|
||||
|
||||
|
||||
#endif
|
|
@ -75,8 +75,8 @@ DRM_MODULES ?= $(MODULE_LIST)
|
|||
|
||||
# These definitions are for handling dependencies in the out of kernel build.
|
||||
|
||||
DRMSHARED = drm.h drm_sarea.h
|
||||
DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h $(DRMSHARED)
|
||||
DRMSHARED = drm.h drm_sarea.h drm_drawable.c
|
||||
DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h
|
||||
COREHEADERS = drm_core.h drm_sman.h drm_hashtab.h
|
||||
|
||||
TDFXHEADERS = tdfx_drv.h $(DRMHEADERS)
|
||||
|
|
|
@ -12,13 +12,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
|
|||
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
|
||||
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
|
||||
drm_memory_debug.o ati_pcigart.o drm_sman.o \
|
||||
drm_hashtab.o drm_mm.o
|
||||
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
|
||||
drm_fence.o drm_ttm.o drm_bo.o
|
||||
tdfx-objs := tdfx_drv.o
|
||||
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
||||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||
i810-objs := i810_drv.o i810_dma.o
|
||||
i830-objs := i830_drv.o i830_dma.o i830_irq.o
|
||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
|
||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||
i915_buffer.o
|
||||
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
|
||||
sis-objs := sis_drv.o sis_mm.o
|
||||
ffb-objs := ffb_drv.o ffb_context.o
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
* can build the DRM (part of PI DRI). 4/21/2000 S + B */
|
||||
#include <asm/current.h>
|
||||
#endif /* __alpha__ */
|
||||
#include <linux/config.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
@ -84,6 +83,7 @@
|
|||
#include <linux/poll.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include "drm.h"
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
|
||||
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
|
||||
|
@ -110,6 +110,7 @@
|
|||
#define DRIVER_IRQ_VBL 0x100
|
||||
#define DRIVER_DMA_QUEUE 0x200
|
||||
#define DRIVER_FB_DMA 0x400
|
||||
#define DRIVER_IRQ_VBL2 0x800
|
||||
|
||||
|
||||
/*@}*/
|
||||
|
@ -154,9 +155,18 @@
|
|||
#define DRM_MEM_CTXLIST 21
|
||||
#define DRM_MEM_MM 22
|
||||
#define DRM_MEM_HASHTAB 23
|
||||
#define DRM_MEM_OBJECTS 24
|
||||
#define DRM_MEM_FENCE 25
|
||||
#define DRM_MEM_TTM 26
|
||||
#define DRM_MEM_BUFOBJ 27
|
||||
|
||||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||
#define DRM_MAP_HASH_OFFSET 0x10000000
|
||||
#define DRM_MAP_HASH_ORDER 12
|
||||
#define DRM_OBJECT_HASH_ORDER 12
|
||||
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
|
||||
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
|
||||
#define DRM_MM_INIT_MAX_PAGES 256
|
||||
|
||||
/*@}*/
|
||||
|
||||
|
@ -298,8 +308,8 @@ typedef struct drm_devstate {
|
|||
} drm_devstate_t;
|
||||
|
||||
typedef struct drm_magic_entry {
|
||||
drm_hash_item_t hash_item;
|
||||
struct list_head head;
|
||||
drm_hash_item_t hash_item;
|
||||
struct list_head head;
|
||||
struct drm_file *priv;
|
||||
} drm_magic_entry_t;
|
||||
|
||||
|
@ -387,6 +397,19 @@ typedef struct drm_buf_entry {
|
|||
drm_freelist_t freelist;
|
||||
} drm_buf_entry_t;
|
||||
|
||||
/*
|
||||
* This should be small enough to allow the use of kmalloc for hash tables
|
||||
* instead of vmalloc.
|
||||
*/
|
||||
|
||||
#define DRM_FILE_HASH_ORDER 8
|
||||
typedef enum{
|
||||
_DRM_REF_USE=0,
|
||||
_DRM_REF_TYPE1,
|
||||
_DRM_NO_REF_TYPES
|
||||
} drm_ref_t;
|
||||
|
||||
|
||||
/** File private data */
|
||||
typedef struct drm_file {
|
||||
int authenticated;
|
||||
|
@ -401,6 +424,18 @@ typedef struct drm_file {
|
|||
struct drm_head *head;
|
||||
int remove_auth_on_close;
|
||||
unsigned long lock_count;
|
||||
|
||||
/*
|
||||
* The user object hash table is global and resides in the
|
||||
* drm_device structure. We protect the lists and hash tables with the
|
||||
* device struct_mutex. A bit coarse-grained but probably the best
|
||||
* option.
|
||||
*/
|
||||
|
||||
struct list_head refd_objects;
|
||||
struct list_head user_objects;
|
||||
|
||||
drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
|
||||
void *driver_priv;
|
||||
} drm_file_t;
|
||||
|
||||
|
@ -448,7 +483,8 @@ typedef struct drm_device_dma {
|
|||
enum {
|
||||
_DRM_DMA_USE_AGP = 0x01,
|
||||
_DRM_DMA_USE_SG = 0x02,
|
||||
_DRM_DMA_USE_FB = 0x04
|
||||
_DRM_DMA_USE_FB = 0x04,
|
||||
_DRM_DMA_USE_PCI_RO = 0x08
|
||||
} flags;
|
||||
|
||||
} drm_device_dma_t;
|
||||
|
@ -501,14 +537,35 @@ typedef struct drm_sigdata {
|
|||
drm_hw_lock_t *lock;
|
||||
} drm_sigdata_t;
|
||||
|
||||
|
||||
/*
|
||||
* Generic memory manager structs
|
||||
*/
|
||||
|
||||
typedef struct drm_mm_node {
|
||||
struct list_head fl_entry;
|
||||
struct list_head ml_entry;
|
||||
int free;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
struct drm_mm *mm;
|
||||
void *private;
|
||||
} drm_mm_node_t;
|
||||
|
||||
typedef struct drm_mm {
|
||||
drm_mm_node_t root_node;
|
||||
} drm_mm_t;
|
||||
|
||||
|
||||
/**
|
||||
* Mappings list
|
||||
*/
|
||||
typedef struct drm_map_list {
|
||||
struct list_head head; /**< list head */
|
||||
drm_hash_item_t hash;
|
||||
drm_hash_item_t hash;
|
||||
drm_map_t *map; /**< mapping */
|
||||
unsigned int user_token;
|
||||
drm_u64_t user_token;
|
||||
drm_mm_node_t *file_offset_node;
|
||||
} drm_map_list_t;
|
||||
|
||||
typedef drm_map_t drm_local_map_t;
|
||||
|
@ -541,22 +598,77 @@ typedef struct ati_pcigart_info {
|
|||
drm_local_map_t mapping;
|
||||
} drm_ati_pcigart_info;
|
||||
|
||||
/*
|
||||
* Generic memory manager structs
|
||||
/*
|
||||
* User space objects and their references.
|
||||
*/
|
||||
|
||||
typedef struct drm_mm_node {
|
||||
struct list_head fl_entry;
|
||||
struct list_head ml_entry;
|
||||
int free;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
void *private;
|
||||
} drm_mm_node_t;
|
||||
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
|
||||
|
||||
typedef struct drm_mm {
|
||||
drm_mm_node_t root_node;
|
||||
} drm_mm_t;
|
||||
typedef enum {
|
||||
drm_fence_type,
|
||||
drm_buffer_type,
|
||||
drm_ttm_type
|
||||
|
||||
/*
|
||||
* Add other user space object types here.
|
||||
*/
|
||||
|
||||
} drm_object_type_t;
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* A user object is a structure that helps the drm give out user handles
|
||||
* to kernel internal objects and to keep track of these objects so that
|
||||
* they can be destroyed, for example when the user space process exits.
|
||||
* Designed to be accessible using a user space 32-bit handle.
|
||||
*/
|
||||
|
||||
typedef struct drm_user_object{
|
||||
drm_hash_item_t hash;
|
||||
struct list_head list;
|
||||
drm_object_type_t type;
|
||||
atomic_t refcount;
|
||||
int shareable;
|
||||
drm_file_t *owner;
|
||||
void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
|
||||
drm_ref_t ref_action);
|
||||
void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
|
||||
drm_ref_t unref_action);
|
||||
void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
|
||||
} drm_user_object_t;
|
||||
|
||||
/*
|
||||
* A ref object is a structure which is used to
|
||||
* keep track of references to user objects and to keep track of these
|
||||
* references so that they can be destroyed for example when the user space
|
||||
* process exits. Designed to be accessible using a pointer to the _user_ object.
|
||||
*/
|
||||
|
||||
|
||||
typedef struct drm_ref_object {
|
||||
drm_hash_item_t hash;
|
||||
struct list_head list;
|
||||
atomic_t refcount;
|
||||
drm_ref_t unref_action;
|
||||
} drm_ref_object_t;
|
||||
|
||||
|
||||
#include "drm_ttm.h"
|
||||
|
||||
/*
|
||||
* buffer object driver
|
||||
*/
|
||||
|
||||
typedef struct drm_bo_driver{
|
||||
int cached[DRM_BO_MEM_TYPES];
|
||||
drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
|
||||
drm_ttm_backend_t *(*create_ttm_backend_entry)
|
||||
(struct drm_device *dev);
|
||||
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
|
||||
int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
|
||||
} drm_bo_driver_t;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -564,6 +676,7 @@ typedef struct drm_mm {
|
|||
* a family of cards. There will one drm_device for each card present
|
||||
* in this family
|
||||
*/
|
||||
|
||||
struct drm_device;
|
||||
struct drm_driver {
|
||||
int (*load) (struct drm_device *, unsigned long flags);
|
||||
|
@ -582,6 +695,7 @@ struct drm_driver {
|
|||
int new);
|
||||
void (*kernel_context_switch_unlock) (struct drm_device * dev);
|
||||
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
|
||||
int (*vblank_wait2) (struct drm_device * dev, unsigned int *sequence);
|
||||
int (*dri_library_name) (struct drm_device * dev, char * buf);
|
||||
|
||||
/**
|
||||
|
@ -609,6 +723,9 @@ struct drm_driver {
|
|||
unsigned long (*get_reg_ofs) (struct drm_device * dev);
|
||||
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
|
||||
|
||||
struct drm_fence_driver *fence_driver;
|
||||
struct drm_bo_driver *bo_driver;
|
||||
|
||||
int major;
|
||||
int minor;
|
||||
int patchlevel;
|
||||
|
@ -638,6 +755,71 @@ typedef struct drm_head {
|
|||
struct class_device *dev_class;
|
||||
} drm_head_t;
|
||||
|
||||
typedef struct drm_cache {
|
||||
|
||||
/*
|
||||
* Memory caches
|
||||
*/
|
||||
|
||||
kmem_cache_t *mm;
|
||||
kmem_cache_t *fence_object;
|
||||
} drm_cache_t;
|
||||
|
||||
|
||||
|
||||
typedef struct drm_fence_driver{
|
||||
int no_types;
|
||||
uint32_t wrap_diff;
|
||||
uint32_t flush_diff;
|
||||
uint32_t sequence_mask;
|
||||
int lazy_capable;
|
||||
int (*emit) (struct drm_device *dev, uint32_t flags,
|
||||
uint32_t *breadcrumb,
|
||||
uint32_t *native_type);
|
||||
void (*poke_flush) (struct drm_device *dev);
|
||||
} drm_fence_driver_t;
|
||||
|
||||
#define _DRM_FENCE_TYPE_EXE 0x00
|
||||
|
||||
typedef struct drm_fence_manager{
|
||||
int initialized;
|
||||
rwlock_t lock;
|
||||
|
||||
/*
|
||||
* The list below should be maintained in sequence order and
|
||||
* access is protected by the above spinlock.
|
||||
*/
|
||||
|
||||
struct list_head ring;
|
||||
struct list_head *fence_types[32];
|
||||
volatile uint32_t pending_flush;
|
||||
wait_queue_head_t fence_queue;
|
||||
int pending_exe_flush;
|
||||
uint32_t last_exe_flush;
|
||||
uint32_t exe_flush_sequence;
|
||||
atomic_t count;
|
||||
} drm_fence_manager_t;
|
||||
|
||||
typedef struct drm_buffer_manager{
|
||||
struct mutex init_mutex;
|
||||
int nice_mode;
|
||||
int initialized;
|
||||
drm_file_t *last_to_validate;
|
||||
int has_type[DRM_BO_MEM_TYPES];
|
||||
int use_type[DRM_BO_MEM_TYPES];
|
||||
drm_mm_t manager[DRM_BO_MEM_TYPES];
|
||||
struct list_head lru[DRM_BO_MEM_TYPES];
|
||||
struct list_head pinned[DRM_BO_MEM_TYPES];
|
||||
struct list_head unfenced;
|
||||
struct list_head ddestroy;
|
||||
struct work_struct wq;
|
||||
uint32_t fence_type;
|
||||
unsigned long cur_pages;
|
||||
atomic_t count;
|
||||
} drm_buffer_manager_t;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* DRM device structure. This structure represent a complete card that
|
||||
* may contain multiple heads.
|
||||
|
@ -676,8 +858,8 @@ typedef struct drm_device {
|
|||
/*@{ */
|
||||
drm_file_t *file_first; /**< file list head */
|
||||
drm_file_t *file_last; /**< file list tail */
|
||||
drm_open_hash_t magiclist;
|
||||
struct list_head magicfree;
|
||||
drm_open_hash_t magiclist;
|
||||
struct list_head magicfree;
|
||||
/*@} */
|
||||
|
||||
/** \name Memory management */
|
||||
|
@ -685,6 +867,10 @@ typedef struct drm_device {
|
|||
drm_map_list_t *maplist; /**< Linked list of regions */
|
||||
int map_count; /**< Number of mappable regions */
|
||||
drm_open_hash_t map_hash; /**< User token hash table for maps */
|
||||
drm_mm_t offset_manager; /**< User token manager */
|
||||
drm_open_hash_t object_hash; /**< User token hash table for objects */
|
||||
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
|
||||
struct page *ttm_dummy_page;
|
||||
|
||||
/** \name Context handle management */
|
||||
/*@{ */
|
||||
|
@ -732,9 +918,13 @@ typedef struct drm_device {
|
|||
|
||||
wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
|
||||
atomic_t vbl_received;
|
||||
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
|
||||
spinlock_t vbl_lock;
|
||||
drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
|
||||
drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
|
||||
unsigned int vbl_pending;
|
||||
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
|
||||
void (*locked_tasklet_func)(struct drm_device *dev);
|
||||
|
||||
/*@} */
|
||||
cycles_t ctx_start;
|
||||
|
@ -747,10 +937,8 @@ typedef struct drm_device {
|
|||
drm_agp_head_t *agp; /**< AGP data */
|
||||
|
||||
struct pci_dev *pdev; /**< PCI device structure */
|
||||
int pci_domain; /**< PCI bus domain number */
|
||||
int pci_bus; /**< PCI bus number */
|
||||
int pci_slot; /**< PCI slot number */
|
||||
int pci_func; /**< PCI function number */
|
||||
int pci_vendor; /**< PCI vendor id */
|
||||
int pci_device; /**< PCI device id */
|
||||
#ifdef __alpha__
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
|
||||
struct pci_controler *hose;
|
||||
|
@ -768,14 +956,101 @@ typedef struct drm_device {
|
|||
drm_local_map_t *agp_buffer_map;
|
||||
unsigned int agp_buffer_token;
|
||||
drm_head_t primary; /**< primary screen head */
|
||||
|
||||
drm_fence_manager_t fm;
|
||||
drm_buffer_manager_t bm;
|
||||
|
||||
/** \name Drawable information */
|
||||
/*@{ */
|
||||
spinlock_t drw_lock;
|
||||
unsigned int drw_bitfield_length;
|
||||
u32 *drw_bitfield;
|
||||
unsigned int drw_info_length;
|
||||
drm_drawable_info_t **drw_info;
|
||||
/*@} */
|
||||
} drm_device_t;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
typedef struct drm_agp_ttm_priv {
|
||||
DRM_AGP_MEM *mem;
|
||||
struct agp_bridge_data *bridge;
|
||||
unsigned alloc_type;
|
||||
unsigned cached_type;
|
||||
unsigned uncached_type;
|
||||
int populated;
|
||||
} drm_agp_ttm_priv;
|
||||
#endif
|
||||
|
||||
typedef struct drm_fence_object{
|
||||
drm_user_object_t base;
|
||||
atomic_t usage;
|
||||
|
||||
/*
|
||||
* The below three fields are protected by the fence manager spinlock.
|
||||
*/
|
||||
|
||||
struct list_head ring;
|
||||
int class;
|
||||
uint32_t native_type;
|
||||
uint32_t type;
|
||||
uint32_t signaled;
|
||||
uint32_t sequence;
|
||||
uint32_t flush_mask;
|
||||
uint32_t submitted_flush;
|
||||
} drm_fence_object_t;
|
||||
|
||||
|
||||
typedef struct drm_buffer_object{
|
||||
drm_device_t *dev;
|
||||
drm_user_object_t base;
|
||||
|
||||
/*
|
||||
* If there is a possibility that the usage variable is zero,
|
||||
* then dev->struct_mutext should be locked before incrementing it.
|
||||
*/
|
||||
|
||||
atomic_t usage;
|
||||
drm_ttm_object_t *ttm_object;
|
||||
drm_ttm_t *ttm;
|
||||
unsigned long num_pages;
|
||||
unsigned long buffer_start;
|
||||
drm_bo_type_t type;
|
||||
unsigned long offset;
|
||||
uint32_t page_alignment;
|
||||
atomic_t mapped;
|
||||
uint32_t flags;
|
||||
uint32_t mask;
|
||||
|
||||
drm_mm_node_t *node_ttm; /* MM node for on-card RAM */
|
||||
drm_mm_node_t *node_card; /* MM node for ttm*/
|
||||
struct list_head lru_ttm; /* LRU for the ttm pages*/
|
||||
struct list_head lru_card; /* For memory types with on-card RAM */
|
||||
struct list_head ddestroy;
|
||||
|
||||
uint32_t fence_type;
|
||||
uint32_t fence_class;
|
||||
drm_fence_object_t *fence;
|
||||
uint32_t priv_flags;
|
||||
wait_queue_head_t event_queue;
|
||||
struct mutex mutex;
|
||||
} drm_buffer_object_t;
|
||||
|
||||
#define _DRM_BO_FLAG_UNFENCED 0x00000001
|
||||
#define _DRM_BO_FLAG_EVICTED 0x00000002
|
||||
|
||||
|
||||
static __inline__ int drm_core_check_feature(struct drm_device *dev,
|
||||
int feature)
|
||||
{
|
||||
return ((dev->driver->driver_features & feature) ? 1 : 0);
|
||||
}
|
||||
|
||||
#ifdef __alpha__
|
||||
#define drm_get_pci_domain(dev) dev->hose->bus->number
|
||||
#else
|
||||
#define drm_get_pci_domain(dev) 0
|
||||
#endif
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
static inline int drm_core_has_AGP(struct drm_device *dev)
|
||||
{
|
||||
|
@ -806,9 +1081,22 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
|
|||
}
|
||||
|
||||
#else
|
||||
static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
|
||||
unsigned int flags)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_mtrr_del(int handle, unsigned long offset,
|
||||
unsigned long size, unsigned int flags)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#define drm_core_has_MTRR(dev) (0)
|
||||
#endif
|
||||
|
||||
|
||||
/******************************************************************/
|
||||
/** \name Internal function definitions */
|
||||
/*@{*/
|
||||
|
@ -837,6 +1125,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
|
|||
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
|
||||
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
|
||||
extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
|
||||
|
||||
/* Memory management support (drm_memory.h) */
|
||||
#include "drm_memory.h"
|
||||
|
@ -852,6 +1141,14 @@ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
|
|||
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
|
||||
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
|
||||
|
||||
extern void drm_free_memctl(size_t size);
|
||||
extern int drm_alloc_memctl(size_t size);
|
||||
extern void drm_query_memctl(drm_u64_t *cur_used,
|
||||
drm_u64_t *low_threshold,
|
||||
drm_u64_t *high_threshold);
|
||||
extern void drm_init_memctl(size_t low_threshold,
|
||||
size_t high_threshold);
|
||||
|
||||
/* Misc. IOCTL support (drm_ioctl.h) */
|
||||
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
@ -900,6 +1197,10 @@ extern int drm_adddraw(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_rmdraw(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
|
||||
drm_drawable_t id);
|
||||
|
||||
/* Authentication IOCTL support (drm_auth.h) */
|
||||
extern int drm_getmagic(struct inode *inode, struct file *filp,
|
||||
|
@ -915,6 +1216,13 @@ extern int drm_unlock(struct inode *inode, struct file *filp,
|
|||
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
|
||||
extern int drm_lock_free(drm_device_t * dev,
|
||||
__volatile__ unsigned int *lock, unsigned int context);
|
||||
/*
|
||||
* These are exported to drivers so that they can implement fencing using
|
||||
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
|
||||
*/
|
||||
|
||||
extern int drm_i_have_hw_lock(struct file *filp);
|
||||
extern int drm_kernel_take_hw_lock(struct file *filp);
|
||||
|
||||
/* Buffer management support (drm_bufs.h) */
|
||||
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
|
||||
|
@ -964,6 +1272,7 @@ extern int drm_wait_vblank(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
|
||||
extern void drm_vbl_send_signals(drm_device_t * dev);
|
||||
extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*));
|
||||
|
||||
/* AGP/GART support (drm_agpsupport.h) */
|
||||
extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
|
||||
|
@ -999,7 +1308,8 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
|
|||
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
|
||||
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
|
||||
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
|
||||
|
||||
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
||||
drm_ttm_backend_t *backend);
|
||||
/* Stub support (drm_stub.h) */
|
||||
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||
struct drm_driver *driver);
|
||||
|
@ -1008,6 +1318,7 @@ extern int drm_put_head(drm_head_t * head);
|
|||
extern unsigned int drm_debug; /* 1 to enable debug output */
|
||||
extern unsigned int drm_cards_limit;
|
||||
extern drm_head_t **drm_heads;
|
||||
extern drm_cache_t drm_cache;
|
||||
extern struct drm_sysfs_class *drm_class;
|
||||
extern struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
|
@ -1051,11 +1362,121 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
|
|||
|
||||
extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
|
||||
unsigned alignment);
|
||||
extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur);
|
||||
extern void drm_mm_put_block(drm_mm_node_t *cur);
|
||||
extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
|
||||
unsigned alignment, int best_match);
|
||||
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
|
||||
extern void drm_mm_takedown(drm_mm_t *mm);
|
||||
extern int drm_mm_clean(drm_mm_t *mm);
|
||||
extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
|
||||
extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
|
||||
extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
|
||||
|
||||
static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
|
||||
{
|
||||
return block->mm;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* User space object bookkeeping (drm_object.c)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
*/
|
||||
|
||||
extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
*/
|
||||
int shareable);
|
||||
extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
|
||||
* release the struct_mutex before calling drm_remove_ref_object.
|
||||
* This function may temporarily release the struct_mutex.
|
||||
*/
|
||||
|
||||
extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held. May temporarily release it.
|
||||
*/
|
||||
|
||||
extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
|
||||
drm_ref_t ref_action);
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
*/
|
||||
|
||||
drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
|
||||
drm_user_object_t *referenced_object,
|
||||
drm_ref_t ref_action);
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
|
||||
* release the struct_mutex before calling drm_remove_ref_object.
|
||||
* This function may temporarily release the struct_mutex.
|
||||
*/
|
||||
|
||||
extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
|
||||
extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
|
||||
drm_user_object_t **object);
|
||||
extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* fence objects (drm_fence.c)
|
||||
*/
|
||||
|
||||
extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
|
||||
extern void drm_fence_manager_init(drm_device_t *dev);
|
||||
extern void drm_fence_manager_takedown(drm_device_t *dev);
|
||||
extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
|
||||
extern int drm_fence_object_flush(drm_device_t * dev,
|
||||
volatile drm_fence_object_t * fence,
|
||||
uint32_t type);
|
||||
extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
|
||||
uint32_t type);
|
||||
extern void drm_fence_usage_deref_locked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence);
|
||||
extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence);
|
||||
extern int drm_fence_object_wait(drm_device_t * dev,
|
||||
volatile drm_fence_object_t * fence,
|
||||
int lazy, int ignore_signals, uint32_t mask);
|
||||
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
|
||||
uint32_t fence_flags,
|
||||
drm_fence_object_t **c_fence);
|
||||
extern int drm_fence_add_user_object(drm_file_t *priv,
|
||||
drm_fence_object_t *fence,
|
||||
int shareable);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
|
||||
|
||||
/*
|
||||
* buffer objects (drm_bo.c)
|
||||
*/
|
||||
|
||||
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
|
||||
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
|
||||
extern int drm_bo_driver_finish(drm_device_t *dev);
|
||||
extern int drm_bo_driver_init(drm_device_t *dev);
|
||||
extern int drm_fence_buffer_objects(drm_file_t * priv,
|
||||
struct list_head *list,
|
||||
uint32_t fence_flags,
|
||||
drm_fence_object_t *fence,
|
||||
drm_fence_object_t **used_fence);
|
||||
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
|
@ -1127,6 +1548,58 @@ extern void *drm_alloc(size_t size, int area);
|
|||
extern void drm_free(void *pt, size_t size, int area);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Accounting variants of standard calls.
|
||||
*/
|
||||
|
||||
static inline void *drm_ctl_alloc(size_t size, int area)
|
||||
{
|
||||
void *ret;
|
||||
if (drm_alloc_memctl(size))
|
||||
return NULL;
|
||||
ret = drm_alloc(size, area);
|
||||
if (!ret)
|
||||
drm_free_memctl(size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (drm_alloc_memctl(nmemb*size))
|
||||
return NULL;
|
||||
ret = drm_calloc(nmemb, size, area);
|
||||
if (!ret)
|
||||
drm_free_memctl(nmemb*size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void drm_ctl_free(void *pt, size_t size, int area)
|
||||
{
|
||||
drm_free(pt, size, area);
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
|
||||
static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
|
||||
int flags)
|
||||
{
|
||||
void *ret;
|
||||
if (drm_alloc_memctl(size))
|
||||
return NULL;
|
||||
ret = kmem_cache_alloc(cache, flags);
|
||||
if (!ret)
|
||||
drm_free_memctl(size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
|
||||
void *obj)
|
||||
{
|
||||
kmem_cache_free(cache, obj);
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
|
||||
/*@}*/
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -552,4 +552,162 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
|
|||
return agp_unbind_memory(handle);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* AGP ttm backend interface.
|
||||
*/
|
||||
|
||||
#ifndef AGP_USER_TYPES
|
||||
#define AGP_USER_TYPES (1 << 16)
|
||||
#define AGP_USER_MEMORY (AGP_USER_TYPES)
|
||||
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
|
||||
#endif
|
||||
|
||||
static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
|
||||
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
|
||||
}
|
||||
|
||||
|
||||
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
|
||||
struct page **pages) {
|
||||
|
||||
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
|
||||
struct page **cur_page, **last_page = pages + num_pages;
|
||||
DRM_AGP_MEM *mem;
|
||||
|
||||
if (drm_alloc_memctl(num_pages * sizeof(void *)))
|
||||
return -1;
|
||||
|
||||
DRM_DEBUG("drm_agp_populate_ttm\n");
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
|
||||
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
|
||||
#else
|
||||
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
|
||||
#endif
|
||||
if (!mem) {
|
||||
drm_free_memctl(num_pages *sizeof(void *));
|
||||
return -1;
|
||||
}
|
||||
|
||||
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
|
||||
mem->page_count = 0;
|
||||
for (cur_page = pages; cur_page < last_page; ++cur_page) {
|
||||
mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
|
||||
}
|
||||
agp_priv->mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
|
||||
unsigned long offset,
|
||||
int cached)
|
||||
{
|
||||
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
|
||||
DRM_AGP_MEM *mem = agp_priv->mem;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_agp_bind_ttm\n");
|
||||
DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
|
||||
(cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
|
||||
mem->is_flushed = TRUE;
|
||||
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
|
||||
ret = drm_agp_bind_memory(mem, offset);
|
||||
if (ret) {
|
||||
DRM_ERROR("AGP Bind memory failed\n");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
|
||||
|
||||
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
|
||||
|
||||
DRM_DEBUG("drm_agp_unbind_ttm\n");
|
||||
if (agp_priv->mem->is_bound)
|
||||
return drm_agp_unbind_memory(agp_priv->mem);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
|
||||
|
||||
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
|
||||
DRM_AGP_MEM *mem = agp_priv->mem;
|
||||
|
||||
DRM_DEBUG("drm_agp_clear_ttm\n");
|
||||
if (mem) {
|
||||
unsigned long num_pages = mem->page_count;
|
||||
backend->unbind(backend);
|
||||
agp_free_memory(mem);
|
||||
drm_free_memctl(num_pages *sizeof(void *));
|
||||
}
|
||||
|
||||
agp_priv->mem = NULL;
|
||||
}
|
||||
|
||||
static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
|
||||
|
||||
drm_agp_ttm_priv *agp_priv;
|
||||
|
||||
if (backend) {
|
||||
DRM_DEBUG("drm_agp_destroy_ttm\n");
|
||||
agp_priv = (drm_agp_ttm_priv *) backend->private;
|
||||
if (agp_priv) {
|
||||
if (agp_priv->mem) {
|
||||
backend->clear(backend);
|
||||
}
|
||||
drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
|
||||
backend->private = NULL;
|
||||
}
|
||||
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
|
||||
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
||||
drm_ttm_backend_t *backend)
|
||||
{
|
||||
|
||||
drm_ttm_backend_t *agp_be;
|
||||
drm_agp_ttm_priv *agp_priv;
|
||||
|
||||
agp_be = (backend != NULL) ? backend:
|
||||
drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
||||
|
||||
if (!agp_be)
|
||||
return NULL;
|
||||
|
||||
agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
|
||||
|
||||
if (!agp_priv) {
|
||||
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
agp_priv->mem = NULL;
|
||||
agp_priv->alloc_type = AGP_USER_MEMORY;
|
||||
agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
|
||||
agp_priv->uncached_type = AGP_USER_MEMORY;
|
||||
agp_priv->bridge = dev->agp->bridge;
|
||||
agp_priv->populated = FALSE;
|
||||
agp_be->aperture_base = dev->agp->agp_info.aper_base;
|
||||
agp_be->private = (void *) agp_priv;
|
||||
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
|
||||
agp_be->populate = drm_agp_populate;
|
||||
agp_be->clear = drm_agp_clear_ttm;
|
||||
agp_be->bind = drm_agp_bind_ttm;
|
||||
agp_be->unbind = drm_agp_unbind_ttm;
|
||||
agp_be->destroy = drm_agp_destroy_ttm;
|
||||
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
|
||||
(backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
|
||||
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
|
||||
(dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
|
||||
agp_be->drm_map_type = _DRM_AGP;
|
||||
return agp_be;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_init_ttm);
|
||||
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -65,8 +65,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
|
||||
unsigned long user_token, int hashed_handle)
|
||||
static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
|
||||
unsigned long user_token, int hashed_handle)
|
||||
{
|
||||
int use_hashed_handle;
|
||||
|
||||
|
@ -78,14 +78,16 @@ int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
|
|||
#error Unsupported long size. Neither 64 nor 32 bits.
|
||||
#endif
|
||||
|
||||
if (use_hashed_handle) {
|
||||
return drm_ht_just_insert_please(&dev->map_hash, hash,
|
||||
user_token, 32 - PAGE_SHIFT - 3,
|
||||
PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
|
||||
} else {
|
||||
hash->key = user_token;
|
||||
return drm_ht_insert_item(&dev->map_hash, hash);
|
||||
if (!use_hashed_handle) {
|
||||
int ret;
|
||||
hash->key = user_token >> PAGE_SHIFT;
|
||||
ret = drm_ht_insert_item(&dev->map_hash, hash);
|
||||
if (ret != -EINVAL)
|
||||
return ret;
|
||||
}
|
||||
return drm_ht_just_insert_please(&dev->map_hash, hash,
|
||||
user_token, 32 - PAGE_SHIFT - 3,
|
||||
0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -290,16 +292,16 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
|
||||
user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
|
||||
map->offset;
|
||||
ret = drm_map_handle(dev, &list->hash, user_token, 0);
|
||||
ret = drm_map_handle(dev, &list->hash, user_token, 0);
|
||||
|
||||
if (ret) {
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list->user_token = list->hash.key;
|
||||
list->user_token = list->hash.key << PAGE_SHIFT;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
*maplist = list;
|
||||
|
@ -384,7 +386,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
|
|||
|
||||
if (r_list->map == map) {
|
||||
list_del(list);
|
||||
drm_ht_remove_key(&dev->map_hash, r_list->user_token);
|
||||
drm_ht_remove_key(&dev->map_hash,
|
||||
r_list->user_token >> PAGE_SHIFT);
|
||||
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
break;
|
||||
}
|
||||
|
@ -420,6 +423,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
|
|||
dmah.size = map->size;
|
||||
__drm_pci_free(dev, &dmah);
|
||||
break;
|
||||
case _DRM_TTM:
|
||||
BUG_ON(1);
|
||||
}
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
|
||||
|
@ -940,6 +945,9 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
request->count = entry->buf_count;
|
||||
request->size = size;
|
||||
|
||||
if (request->flags & _DRM_PCI_BUFFER_RO)
|
||||
dma->flags = _DRM_DMA_USE_PCI_RO;
|
||||
|
||||
atomic_dec(&dev->buf_alloc);
|
||||
return 0;
|
||||
|
||||
|
@ -1526,9 +1534,10 @@ int drm_freebufs(struct inode *inode, struct file *filp,
|
|||
* \param arg pointer to a drm_buf_map structure.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Maps the AGP or SG buffer region with do_mmap(), and copies information
|
||||
* about each buffer into user space. The PCI buffers are already mapped on the
|
||||
* addbufs_pci() call.
|
||||
* Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
|
||||
* about each buffer into user space. For PCI buffers, it calls do_mmap() with
|
||||
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
|
||||
* drm_mmap_dma().
|
||||
*/
|
||||
int drm_mapbufs(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
|
|
|
@ -0,0 +1,434 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* This kernel module is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* This code provides access to unexported mm kernel features. It is necessary
|
||||
* to use the new DRM memory manager code with kernels that don't support it
|
||||
* directly.
|
||||
*
|
||||
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Linux kernel mm subsystem authors.
|
||||
* (Most code taken from there).
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
|
||||
/*
|
||||
* These have bad performance in the AGP module for the indicated kernel versions.
|
||||
*/
|
||||
|
||||
int drm_map_page_into_agp(struct page *page)
|
||||
{
|
||||
int i;
|
||||
i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
|
||||
/* Caller's responsibility to call global_flush_tlb() for
|
||||
* performance reasons */
|
||||
return i;
|
||||
}
|
||||
|
||||
int drm_unmap_page_from_agp(struct page *page)
|
||||
{
|
||||
int i;
|
||||
i = change_page_attr(page, 1, PAGE_KERNEL);
|
||||
/* Caller's responsibility to call global_flush_tlb() for
|
||||
* performance reasons */
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
|
||||
/*
|
||||
* The protection map was exported in 2.6.19
|
||||
*/
|
||||
|
||||
pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
||||
{
|
||||
#ifdef MODULE
|
||||
static pgprot_t drm_protection_map[16] = {
|
||||
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
|
||||
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
|
||||
};
|
||||
|
||||
return drm_protection_map[vm_flags & 0x0F];
|
||||
#else
|
||||
extern pgprot_t protection_map[];
|
||||
return protection_map[vm_flags & 0x0F];
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
|
||||
/*
|
||||
* vm code for kernels below 2,6,15 in which version a major vm write
|
||||
* occured. This implement a simple straightforward
|
||||
* version similar to what's going to be
|
||||
* in kernel 2.6.20+?
|
||||
*/
|
||||
|
||||
static int drm_pte_is_clear(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int ret = 1;
|
||||
pte_t *pte;
|
||||
pmd_t *pmd;
|
||||
pud_t *pud;
|
||||
pgd_t *pgd;
|
||||
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_none(*pgd))
|
||||
goto unlock;
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
goto unlock;
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
goto unlock;
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
if (!pte)
|
||||
goto unlock;
|
||||
ret = pte_none(*pte);
|
||||
pte_unmap(pte);
|
||||
unlock:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot)
|
||||
{
|
||||
int ret;
|
||||
if (!drm_pte_is_clear(vma, addr))
|
||||
return -EBUSY;
|
||||
|
||||
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct {
|
||||
spinlock_t lock;
|
||||
struct page *dummy_page;
|
||||
atomic_t present;
|
||||
} drm_np_retry =
|
||||
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
|
||||
|
||||
struct page * get_nopage_retry(void)
|
||||
{
|
||||
if (atomic_read(&drm_np_retry.present) == 0) {
|
||||
struct page *page = alloc_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return NOPAGE_OOM;
|
||||
spin_lock(&drm_np_retry.lock);
|
||||
drm_np_retry.dummy_page = page;
|
||||
atomic_set(&drm_np_retry.present,1);
|
||||
spin_unlock(&drm_np_retry.lock);
|
||||
}
|
||||
get_page(drm_np_retry.dummy_page);
|
||||
return drm_np_retry.dummy_page;
|
||||
}
|
||||
|
||||
void free_nopage_retry(void)
|
||||
{
|
||||
if (atomic_read(&drm_np_retry.present) == 1) {
|
||||
spin_lock(&drm_np_retry.lock);
|
||||
__free_page(drm_np_retry.dummy_page);
|
||||
drm_np_retry.dummy_page = NULL;
|
||||
atomic_set(&drm_np_retry.present, 0);
|
||||
spin_unlock(&drm_np_retry.lock);
|
||||
}
|
||||
}
|
||||
|
||||
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type)
|
||||
{
|
||||
struct fault_data data;
|
||||
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
|
||||
data.address = address;
|
||||
data.vma = vma;
|
||||
drm_vm_ttm_fault(vma, &data);
|
||||
switch (data.type) {
|
||||
case VM_FAULT_OOM:
|
||||
return NOPAGE_OOM;
|
||||
case VM_FAULT_SIGBUS:
|
||||
return NOPAGE_SIGBUS;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOPAGE_REFAULT;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
|
||||
/*
|
||||
* VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
|
||||
* workaround for a single BUG statement in do_no_page in these versions. The
|
||||
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
|
||||
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
|
||||
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
|
||||
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
|
||||
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
|
||||
* phew.
|
||||
*/
|
||||
|
||||
typedef struct p_mm_entry {
|
||||
struct list_head head;
|
||||
struct mm_struct *mm;
|
||||
atomic_t refcount;
|
||||
int locked;
|
||||
} p_mm_entry_t;
|
||||
|
||||
typedef struct vma_entry {
|
||||
struct list_head head;
|
||||
struct vm_area_struct *vma;
|
||||
} vma_entry_t;
|
||||
|
||||
|
||||
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type)
|
||||
{
|
||||
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
struct page *page;
|
||||
drm_ttm_t *ttm;
|
||||
drm_buffer_manager_t *bm;
|
||||
drm_device_t *dev;
|
||||
|
||||
/*
|
||||
* FIXME: Check can't map aperture flag.
|
||||
*/
|
||||
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
|
||||
if (!map)
|
||||
return NOPAGE_OOM;
|
||||
|
||||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS;
|
||||
|
||||
ttm = (drm_ttm_t *) map->offset;
|
||||
dev = ttm->dev;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_fixup_ttm_caching(ttm);
|
||||
BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
|
||||
|
||||
bm = &dev->bm;
|
||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
page = ttm->pages[page_offset];
|
||||
|
||||
if (!page) {
|
||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||
page = NOPAGE_OOM;
|
||||
goto out;
|
||||
}
|
||||
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
page = NOPAGE_OOM;
|
||||
goto out;
|
||||
}
|
||||
++bm->cur_pages;
|
||||
SetPageLocked(page);
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int drm_ttm_map_bound(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
|
||||
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
|
||||
int ret = 0;
|
||||
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
|
||||
unsigned long pfn = ttm->aper_offset +
|
||||
(ttm->be->aperture_base >> PAGE_SHIFT);
|
||||
pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
|
||||
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
vma->vm_end - vma->vm_start,
|
||||
pgprot);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||
{
|
||||
p_mm_entry_t *entry, *n_entry;
|
||||
vma_entry_t *v_entry;
|
||||
drm_local_map_t *map = (drm_local_map_t *)
|
||||
vma->vm_private_data;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
|
||||
if (!v_entry) {
|
||||
DRM_ERROR("Allocation of vma pointer entry failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
v_entry->vma = vma;
|
||||
map->handle = (void *) v_entry;
|
||||
list_add_tail(&v_entry->head, &ttm->vma_list);
|
||||
|
||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
||||
if (mm == entry->mm) {
|
||||
atomic_inc(&entry->refcount);
|
||||
return 0;
|
||||
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
|
||||
}
|
||||
|
||||
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
|
||||
if (!n_entry) {
|
||||
DRM_ERROR("Allocation of process mm pointer entry failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&n_entry->head);
|
||||
n_entry->mm = mm;
|
||||
n_entry->locked = 0;
|
||||
atomic_set(&n_entry->refcount, 0);
|
||||
list_add_tail(&n_entry->head, &entry->head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||
{
|
||||
p_mm_entry_t *entry, *n;
|
||||
vma_entry_t *v_entry, *v_n;
|
||||
int found = 0;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
|
||||
if (v_entry->vma == vma) {
|
||||
found = 1;
|
||||
list_del(&v_entry->head);
|
||||
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
|
||||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(!found);
|
||||
|
||||
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
|
||||
if (mm == entry->mm) {
|
||||
if (atomic_add_negative(-1, &entry->refcount)) {
|
||||
list_del(&entry->head);
|
||||
BUG_ON(entry->locked);
|
||||
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int drm_ttm_lock_mm(drm_ttm_t * ttm)
|
||||
{
|
||||
p_mm_entry_t *entry;
|
||||
int lock_ok = 1;
|
||||
|
||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
||||
BUG_ON(entry->locked);
|
||||
if (!down_write_trylock(&entry->mm->mmap_sem)) {
|
||||
lock_ok = 0;
|
||||
break;
|
||||
}
|
||||
entry->locked = 1;
|
||||
}
|
||||
|
||||
if (lock_ok)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
||||
if (!entry->locked)
|
||||
break;
|
||||
up_write(&entry->mm->mmap_sem);
|
||||
entry->locked = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Possible deadlock. Try again. Our callers should handle this
|
||||
* and restart.
|
||||
*/
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
void drm_ttm_unlock_mm(drm_ttm_t * ttm)
|
||||
{
|
||||
p_mm_entry_t *entry;
|
||||
|
||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
||||
BUG_ON(!entry->locked);
|
||||
up_write(&entry->mm->mmap_sem);
|
||||
entry->locked = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int drm_ttm_remap_bound(drm_ttm_t *ttm)
|
||||
{
|
||||
vma_entry_t *v_entry;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(v_entry, &ttm->vma_list, head) {
|
||||
ret = drm_ttm_map_bound(v_entry->vma);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
drm_ttm_unlock_mm(ttm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void drm_ttm_finish_unmap(drm_ttm_t *ttm)
|
||||
{
|
||||
vma_entry_t *v_entry;
|
||||
|
||||
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
|
||||
return;
|
||||
|
||||
list_for_each_entry(v_entry, &ttm->vma_list, head) {
|
||||
v_entry->vma->vm_flags &= ~VM_PFNMAP;
|
||||
}
|
||||
drm_ttm_unlock_mm(ttm);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -31,6 +31,7 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/agp.h>
|
||||
#ifndef _DRM_COMPAT_H_
|
||||
#define _DRM_COMPAT_H_
|
||||
|
||||
|
@ -227,4 +228,152 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
|
|||
}
|
||||
#endif
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
|
||||
#define DRM_ODD_MM_COMPAT
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Flush relevant caches and clear a VMA structure so that page references
|
||||
* will cause a page fault. Don't flush tlbs.
|
||||
*/
|
||||
|
||||
extern void drm_clear_vma(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end);
|
||||
|
||||
/*
|
||||
* Return the PTE protection map entries for the VMA flags given by
|
||||
* flags. This is a functional interface to the kernel's protection map.
|
||||
*/
|
||||
|
||||
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
||||
|
||||
/*
|
||||
* These are similar to the current kernel gatt pages allocator, only that we
|
||||
* want a struct page pointer instead of a virtual address. This allows for pages
|
||||
* that are not in the kernel linear map.
|
||||
*/
|
||||
|
||||
#define drm_alloc_gatt_pages(order) ({ \
|
||||
void *_virt = alloc_gatt_pages(order); \
|
||||
((_virt) ? virt_to_page(_virt) : NULL);})
|
||||
#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
|
||||
|
||||
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
|
||||
/*
|
||||
* These are too slow in earlier kernels.
|
||||
*/
|
||||
|
||||
extern int drm_unmap_page_from_agp(struct page *page);
|
||||
extern int drm_map_page_into_agp(struct page *page);
|
||||
|
||||
#define map_page_into_agp drm_map_page_into_agp
|
||||
#define unmap_page_from_agp drm_unmap_page_from_agp
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
extern struct page *get_nopage_retry(void);
|
||||
extern void free_nopage_retry(void);
|
||||
struct fault_data;
|
||||
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
||||
struct fault_data *data);
|
||||
|
||||
#define NOPAGE_REFAULT get_nopage_retry()
|
||||
#endif
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
|
||||
|
||||
/*
|
||||
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
|
||||
* For now, just return a dummy page that we've allocated out of
|
||||
* static space. The page will be put by do_nopage() since we've already
|
||||
* filled out the pte.
|
||||
*/
|
||||
|
||||
struct fault_data {
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long address;
|
||||
pgoff_t pgoff;
|
||||
unsigned int flags;
|
||||
|
||||
int type;
|
||||
};
|
||||
|
||||
|
||||
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot);
|
||||
|
||||
extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
|
||||
struct drm_ttm;
|
||||
|
||||
|
||||
/*
|
||||
* Add a vma to the ttm vma list, and the
|
||||
* process mm pointer to the ttm mm list. Needs the ttm mutex.
|
||||
*/
|
||||
|
||||
extern int drm_ttm_add_vma(struct drm_ttm * ttm,
|
||||
struct vm_area_struct *vma);
|
||||
/*
|
||||
* Delete a vma and the corresponding mm pointer from the
|
||||
* ttm lists. Needs the ttm mutex.
|
||||
*/
|
||||
extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/*
|
||||
* Attempts to lock all relevant mmap_sems for a ttm, while
|
||||
* not releasing the ttm mutex. May return -EAGAIN to avoid
|
||||
* deadlocks. In that case the caller shall release the ttm mutex,
|
||||
* schedule() and try again.
|
||||
*/
|
||||
|
||||
extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
|
||||
|
||||
/*
|
||||
* Unlock all relevant mmap_sems for a ttm.
|
||||
*/
|
||||
extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
|
||||
|
||||
/*
|
||||
* If the ttm was bound to the aperture, this function shall be called
|
||||
* with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
|
||||
* vmas mapping this ttm. This is needed just after unmapping the ptes of
|
||||
* the vma, otherwise the do_nopage() function will bug :(. The function
|
||||
* releases the mmap_sems for this ttm.
|
||||
*/
|
||||
|
||||
extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
|
||||
|
||||
/*
|
||||
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
||||
* fault these pfns in, because the first one will set the vma VM_PFNMAP
|
||||
* flag, which will make the next fault bug in do_nopage(). The function
|
||||
* releases the mmap_sems for this ttm.
|
||||
*/
|
||||
|
||||
extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
|
||||
|
||||
|
||||
/*
|
||||
* Remap a vma for a bound ttm. Call with the ttm mutex held and
|
||||
* the relevant mmap_sem locked.
|
||||
*/
|
||||
extern int drm_ttm_map_bound(struct vm_area_struct *vma);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
* \param ctx_handle context handle.
|
||||
*
|
||||
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
|
||||
* in drm_device::context_sareas, while holding the drm_device::struct_sem
|
||||
* in drm_device::context_sareas, while holding the drm_device::struct_mutex
|
||||
* lock.
|
||||
*/
|
||||
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
|
||||
|
@ -83,7 +83,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
|
|||
*
|
||||
* Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
|
||||
* drm_device::context_sareas to accommodate the new entry while holding the
|
||||
* drm_device::struct_sem lock.
|
||||
* drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_ctxbitmap_next(drm_device_t * dev)
|
||||
{
|
||||
|
@ -145,7 +145,7 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
|
|||
* \param dev DRM device.
|
||||
*
|
||||
* Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
|
||||
* the drm_device::struct_sem lock.
|
||||
* the drm_device::struct_mutex lock.
|
||||
*/
|
||||
int drm_ctxbitmap_init(drm_device_t * dev)
|
||||
{
|
||||
|
@ -178,7 +178,7 @@ int drm_ctxbitmap_init(drm_device_t * dev)
|
|||
* \param dev DRM device.
|
||||
*
|
||||
* Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
|
||||
* the drm_device::struct_sem lock.
|
||||
* the drm_device::struct_mutex lock.
|
||||
*/
|
||||
void drm_ctxbitmap_cleanup(drm_device_t * dev)
|
||||
{
|
||||
|
|
|
@ -25,11 +25,11 @@
|
|||
|
||||
#define CORE_NAME "drm"
|
||||
#define CORE_DESC "DRM shared core routines"
|
||||
#define CORE_DATE "20051102"
|
||||
#define CORE_DATE "20060810"
|
||||
|
||||
#define DRM_IF_MAJOR 1
|
||||
#define DRM_IF_MINOR 2
|
||||
#define DRM_IF_MINOR 3
|
||||
|
||||
#define CORE_MAJOR 1
|
||||
#define CORE_MINOR 0
|
||||
#define CORE_PATCHLEVEL 1
|
||||
#define CORE_MINOR 1
|
||||
#define CORE_PATCHLEVEL 0
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
/**
|
||||
* \file drm_drawable.c
|
||||
* IOCTLs for drawables
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
/** No-op. */
|
||||
int drm_adddraw(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_draw_t draw;
|
||||
|
||||
draw.handle = 0; /* NOOP */
|
||||
DRM_DEBUG("%d\n", draw.handle);
|
||||
if (copy_to_user((drm_draw_t __user *) arg, &draw, sizeof(draw)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** No-op. */
|
||||
int drm_rmdraw(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
return 0; /* NOOP */
|
||||
}
|
|
@ -50,7 +50,7 @@
|
|||
#include "drmP.h"
|
||||
#include "drm_core.h"
|
||||
|
||||
static void __exit drm_cleanup(drm_device_t * dev);
|
||||
static void drm_cleanup(drm_device_t * dev);
|
||||
int drm_fb_loaded = 0;
|
||||
|
||||
static int drm_version(struct inode *inode, struct file *filp,
|
||||
|
@ -119,9 +119,16 @@ static drm_ioctl_desc_t drm_ioctls[] = {
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl,
|
||||
DRM_AUTH },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
};
|
||||
|
||||
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
|
||||
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
||||
|
||||
/**
|
||||
* Take down the DRM device.
|
||||
|
@ -141,6 +148,11 @@ int drm_lastclose(drm_device_t * dev)
|
|||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (drm_bo_driver_finish(dev)) {
|
||||
DRM_ERROR("DRM memory manager still busy. "
|
||||
"System is unstable. Please reboot.\n");
|
||||
}
|
||||
|
||||
if (dev->driver->lastclose)
|
||||
dev->driver->lastclose(dev);
|
||||
DRM_DEBUG("driver lastclose completed\n");
|
||||
|
@ -154,6 +166,18 @@ int drm_lastclose(drm_device_t * dev)
|
|||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
/* Free drawable information memory */
|
||||
for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
|
||||
i++) {
|
||||
drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
|
||||
|
||||
if (info) {
|
||||
drm_free(info->rects, info->num_rects *
|
||||
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
|
||||
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
del_timer(&dev->timer);
|
||||
|
||||
|
@ -204,7 +228,7 @@ int drm_lastclose(drm_device_t * dev)
|
|||
if (dev->vmalist) {
|
||||
for (vma = dev->vmalist; vma; vma = vma_next) {
|
||||
vma_next = vma->next;
|
||||
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
|
||||
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
|
||||
}
|
||||
dev->vmalist = NULL;
|
||||
}
|
||||
|
@ -242,6 +266,7 @@ int drm_lastclose(drm_device_t * dev)
|
|||
dev->lock.filp = NULL;
|
||||
wake_up_interruptible(&dev->lock.lock_queue);
|
||||
}
|
||||
dev->dev_mapping = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG("lastclose completed\n");
|
||||
|
@ -336,7 +361,7 @@ EXPORT_SYMBOL(drm_init);
|
|||
*
|
||||
* \sa drm_init
|
||||
*/
|
||||
static void __exit drm_cleanup(drm_device_t * dev)
|
||||
static void drm_cleanup(drm_device_t * dev)
|
||||
{
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
@ -346,11 +371,14 @@ static void __exit drm_cleanup(drm_device_t * dev)
|
|||
}
|
||||
|
||||
drm_lastclose(dev);
|
||||
drm_fence_manager_takedown(dev);
|
||||
|
||||
if (dev->maplist) {
|
||||
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
dev->maplist = NULL;
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
drm_mm_takedown(&dev->offset_manager);
|
||||
drm_ht_remove(&dev->object_hash);
|
||||
}
|
||||
|
||||
if (!drm_fb_loaded)
|
||||
|
@ -379,7 +407,7 @@ static void __exit drm_cleanup(drm_device_t * dev)
|
|||
DRM_ERROR("Cannot unload module\n");
|
||||
}
|
||||
|
||||
void __exit drm_exit(struct drm_driver *driver)
|
||||
void drm_exit(struct drm_driver *driver)
|
||||
{
|
||||
int i;
|
||||
drm_device_t *dev = NULL;
|
||||
|
@ -405,6 +433,9 @@ void __exit drm_exit(struct drm_driver *driver)
|
|||
}
|
||||
} else
|
||||
pci_unregister_driver(&driver->pci_driver);
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
free_nopage_retry();
|
||||
#endif
|
||||
DRM_INFO("Module unloaded\n");
|
||||
}
|
||||
EXPORT_SYMBOL(drm_exit);
|
||||
|
@ -415,10 +446,64 @@ static struct file_operations drm_stub_fops = {
|
|||
.open = drm_stub_open
|
||||
};
|
||||
|
||||
static int drm_create_memory_caches(void)
|
||||
{
|
||||
drm_cache.mm = kmem_cache_create("drm_mm_node_t",
|
||||
sizeof(drm_mm_node_t),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL,NULL);
|
||||
if (!drm_cache.mm)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",
|
||||
sizeof(drm_fence_object_t),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL,NULL);
|
||||
if (!drm_cache.fence_object)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_free_mem_cache(kmem_cache_t *cache,
|
||||
const char *name)
|
||||
{
|
||||
if (!cache)
|
||||
return;
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
if (kmem_cache_destroy(cache)) {
|
||||
DRM_ERROR("Warning! DRM is leaking %s memory.\n",
|
||||
name);
|
||||
}
|
||||
#else
|
||||
kmem_cache_destroy(cache);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void drm_free_memory_caches(void )
|
||||
{
|
||||
|
||||
drm_free_mem_cache(drm_cache.fence_object, "fence object");
|
||||
drm_cache.fence_object = NULL;
|
||||
drm_free_mem_cache(drm_cache.mm, "memory manager block");
|
||||
drm_cache.mm = NULL;
|
||||
}
|
||||
|
||||
|
||||
static int __init drm_core_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
int ret;
|
||||
struct sysinfo si;
|
||||
|
||||
si_meminfo(&si);
|
||||
drm_init_memctl(si.totalram/2, si.totalram*3/4);
|
||||
ret = drm_create_memory_caches();
|
||||
if (ret)
|
||||
goto err_p1;
|
||||
|
||||
ret = -ENOMEM;
|
||||
drm_cards_limit =
|
||||
(drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
|
||||
drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
|
||||
|
@ -454,11 +539,13 @@ err_p2:
|
|||
unregister_chrdev(DRM_MAJOR, "drm");
|
||||
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
|
||||
err_p1:
|
||||
drm_free_memory_caches();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit drm_core_exit(void)
|
||||
{
|
||||
drm_free_memory_caches();
|
||||
remove_proc_entry("dri", NULL);
|
||||
drm_sysfs_destroy(drm_class);
|
||||
|
||||
|
@ -535,13 +622,18 @@ int drm_ioctl(struct inode *inode, struct file *filp,
|
|||
current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
|
||||
priv->authenticated);
|
||||
|
||||
if (nr < DRIVER_IOCTL_COUNT)
|
||||
ioctl = &drm_ioctls[nr];
|
||||
else if ((nr >= DRM_COMMAND_BASE)
|
||||
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
|
||||
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
|
||||
else
|
||||
if (nr >= DRIVER_IOCTL_COUNT &&
|
||||
(nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END))
|
||||
goto err_i1;
|
||||
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
|
||||
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
|
||||
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
|
||||
else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)
|
||||
ioctl = &drm_ioctls[nr];
|
||||
else
|
||||
goto err_i1;
|
||||
|
||||
|
||||
|
||||
func = ioctl->func;
|
||||
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) /* Local override? */
|
||||
|
|
|
@ -0,0 +1,619 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
/*
|
||||
* Typically called by the IRQ handler.
|
||||
*/
|
||||
|
||||
void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
|
||||
{
|
||||
int wake = 0;
|
||||
uint32_t diff;
|
||||
uint32_t relevant;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
struct list_head *list, *prev;
|
||||
drm_fence_object_t *fence;
|
||||
int found = 0;
|
||||
|
||||
if (list_empty(&fm->ring))
|
||||
return;
|
||||
|
||||
list_for_each_entry(fence, &fm->ring, ring) {
|
||||
diff = (sequence - fence->sequence) & driver->sequence_mask;
|
||||
if (diff > driver->wrap_diff) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
list = (found) ? fence->ring.prev : fm->ring.prev;
|
||||
prev = list->prev;
|
||||
|
||||
for (; list != &fm->ring; list = prev, prev = list->prev) {
|
||||
fence = list_entry(list, drm_fence_object_t, ring);
|
||||
|
||||
type |= fence->native_type;
|
||||
relevant = type & fence->type;
|
||||
|
||||
if ((fence->signaled | relevant) != fence->signaled) {
|
||||
fence->signaled |= relevant;
|
||||
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
|
||||
fence->base.hash.key, fence->signaled);
|
||||
fence->submitted_flush |= relevant;
|
||||
wake = 1;
|
||||
}
|
||||
|
||||
relevant = fence->flush_mask &
|
||||
~(fence->signaled | fence->submitted_flush);
|
||||
|
||||
if (relevant) {
|
||||
fm->pending_flush |= relevant;
|
||||
fence->submitted_flush = fence->flush_mask;
|
||||
}
|
||||
|
||||
if (!(fence->type & ~fence->signaled)) {
|
||||
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
|
||||
fence->base.hash.key);
|
||||
list_del_init(&fence->ring);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (wake) {
|
||||
DRM_WAKEUP(&fm->fence_queue);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_handler);
|
||||
|
||||
static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
list_del_init(ring);
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
void drm_fence_usage_deref_locked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
if (atomic_dec_and_test(&fence->usage)) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
||||
fence->base.hash.key);
|
||||
atomic_dec(&fm->count);
|
||||
drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
|
||||
fence);
|
||||
}
|
||||
}
|
||||
|
||||
void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
if (atomic_dec_and_test(&fence->usage)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (atomic_read(&fence->usage) == 0) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
atomic_dec(&fm->count);
|
||||
drm_ctl_cache_free(drm_cache.fence_object,
|
||||
sizeof(*fence), fence);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void drm_fence_object_destroy(drm_file_t * priv,
|
||||
drm_user_object_t * base)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_fence_object_t *fence =
|
||||
drm_user_object_entry(base, drm_fence_object_t, base);
|
||||
|
||||
drm_fence_usage_deref_locked(dev, fence);
|
||||
}
|
||||
|
||||
static int fence_signaled(drm_device_t * dev, volatile
|
||||
drm_fence_object_t * fence,
|
||||
uint32_t mask, int poke_flush)
|
||||
{
|
||||
unsigned long flags;
|
||||
int signaled;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
|
||||
if (poke_flush)
|
||||
driver->poke_flush(dev);
|
||||
read_lock_irqsave(&fm->lock, flags);
|
||||
signaled =
|
||||
(fence->type & mask & fence->signaled) == (fence->type & mask);
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
|
||||
return signaled;
|
||||
}
|
||||
|
||||
static void drm_fence_flush_exe(drm_fence_manager_t * fm,
|
||||
drm_fence_driver_t * driver, uint32_t sequence)
|
||||
{
|
||||
uint32_t diff;
|
||||
|
||||
if (!fm->pending_exe_flush) {
|
||||
volatile struct list_head *list;
|
||||
|
||||
/*
|
||||
* Last_exe_flush is invalid. Find oldest sequence.
|
||||
*/
|
||||
|
||||
/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
|
||||
list = &fm->ring;
|
||||
if (list->next == &fm->ring) {
|
||||
return;
|
||||
} else {
|
||||
drm_fence_object_t *fence =
|
||||
list_entry(list->next, drm_fence_object_t, ring);
|
||||
fm->last_exe_flush = (fence->sequence - 1) &
|
||||
driver->sequence_mask;
|
||||
}
|
||||
diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
|
||||
if (diff >= driver->wrap_diff)
|
||||
return;
|
||||
fm->exe_flush_sequence = sequence;
|
||||
fm->pending_exe_flush = 1;
|
||||
} else {
|
||||
diff =
|
||||
(sequence - fm->exe_flush_sequence) & driver->sequence_mask;
|
||||
if (diff < driver->wrap_diff) {
|
||||
fm->exe_flush_sequence = sequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
|
||||
uint32_t type)
|
||||
{
|
||||
return ((fence->signaled & type) == type);
|
||||
}
|
||||
|
||||
int drm_fence_object_flush(drm_device_t * dev,
|
||||
volatile drm_fence_object_t * fence, uint32_t type)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
unsigned long flags;
|
||||
|
||||
if (type & ~fence->type) {
|
||||
DRM_ERROR("Flush trying to extend fence type, "
|
||||
"0x%x, 0x%x\n", type, fence->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
fence->flush_mask |= type;
|
||||
if (fence->submitted_flush == fence->signaled) {
|
||||
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
|
||||
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
|
||||
drm_fence_flush_exe(fm, driver, fence->sequence);
|
||||
fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
|
||||
} else {
|
||||
fm->pending_flush |= (fence->flush_mask &
|
||||
~fence->submitted_flush);
|
||||
fence->submitted_flush = fence->flush_mask;
|
||||
}
|
||||
}
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
driver->poke_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure old fence objects are signaled before their fence sequences are
|
||||
* wrapped around and reused.
|
||||
*/
|
||||
|
||||
void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
uint32_t old_sequence;
|
||||
unsigned long flags;
|
||||
drm_fence_object_t *fence;
|
||||
uint32_t diff;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
read_lock_irqsave(&fm->lock, flags);
|
||||
if (fm->ring.next == &fm->ring) {
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
|
||||
fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
|
||||
atomic_inc(&fence->usage);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
if (diff < driver->wrap_diff) {
|
||||
drm_fence_object_flush(dev, fence, fence->type);
|
||||
}
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_flush_old);
|
||||
|
||||
int drm_fence_object_wait(drm_device_t * dev,
|
||||
volatile drm_fence_object_t * fence,
|
||||
int lazy, int ignore_signals, uint32_t mask)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
int ret = 0;
|
||||
unsigned long _end;
|
||||
int signaled;
|
||||
|
||||
if (mask & ~fence->type) {
|
||||
DRM_ERROR("Wait trying to extend fence type"
|
||||
" 0x%08x 0x%08x\n", mask, fence->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fence_signaled(dev, fence, mask, 0))
|
||||
return 0;
|
||||
|
||||
_end = jiffies + 3 * DRM_HZ;
|
||||
|
||||
drm_fence_object_flush(dev, fence, mask);
|
||||
|
||||
if (lazy && driver->lazy_capable) {
|
||||
|
||||
do {
|
||||
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
|
||||
fence_signaled(dev, fence, mask, 1));
|
||||
if (time_after_eq(jiffies, _end))
|
||||
break;
|
||||
} while (ret == -EINTR && ignore_signals);
|
||||
if (time_after_eq(jiffies, _end) && (ret != 0))
|
||||
ret = -EBUSY;
|
||||
if (ret) {
|
||||
if (ret == -EBUSY) {
|
||||
DRM_ERROR("Fence timeout. "
|
||||
"GPU lockup or fence driver was "
|
||||
"taken down.\n");
|
||||
}
|
||||
return ((ret == -EINTR) ? -EAGAIN : ret);
|
||||
}
|
||||
} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
|
||||
driver->lazy_capable) {
|
||||
|
||||
/*
|
||||
* We use IRQ wait for EXE fence if available to gain
|
||||
* CPU in some cases.
|
||||
*/
|
||||
|
||||
do {
|
||||
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
|
||||
fence_signaled(dev, fence,
|
||||
DRM_FENCE_TYPE_EXE, 1));
|
||||
if (time_after_eq(jiffies, _end))
|
||||
break;
|
||||
} while (ret == -EINTR && ignore_signals);
|
||||
if (time_after_eq(jiffies, _end) && (ret != 0))
|
||||
ret = -EBUSY;
|
||||
if (ret)
|
||||
return ((ret == -EINTR) ? -EAGAIN : ret);
|
||||
}
|
||||
|
||||
if (fence_signaled(dev, fence, mask, 0))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Avoid kernel-space busy-waits.
|
||||
*/
|
||||
#if 1
|
||||
if (!ignore_signals)
|
||||
return -EAGAIN;
|
||||
#endif
|
||||
do {
|
||||
schedule();
|
||||
signaled = fence_signaled(dev, fence, mask, 1);
|
||||
} while (!signaled && !time_after_eq(jiffies, _end));
|
||||
|
||||
if (!signaled)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
|
||||
uint32_t fence_flags, uint32_t type)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
unsigned long flags;
|
||||
uint32_t sequence;
|
||||
uint32_t native_type;
|
||||
int ret;
|
||||
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
ret = driver->emit(dev, fence_flags, &sequence, &native_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
fence->type = type;
|
||||
fence->flush_mask = 0x00;
|
||||
fence->submitted_flush = 0x00;
|
||||
fence->signaled = 0x00;
|
||||
fence->sequence = sequence;
|
||||
fence->native_type = native_type;
|
||||
list_add_tail(&fence->ring, &fm->ring);
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
|
||||
uint32_t fence_flags,
|
||||
drm_fence_object_t * fence)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
atomic_set(&fence->usage, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
INIT_LIST_HEAD(&fence->ring);
|
||||
fence->class = 0;
|
||||
fence->type = type;
|
||||
fence->flush_mask = 0;
|
||||
fence->submitted_flush = 0;
|
||||
fence->signaled = 0;
|
||||
fence->sequence = 0;
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
|
||||
ret = drm_fence_object_emit(dev, fence, fence_flags, type);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
|
||||
int shareable)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_add_user_object(priv, &fence->base, shareable);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
fence->base.type = drm_fence_type;
|
||||
fence->base.remove = &drm_fence_object_destroy;
|
||||
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_add_user_object);
|
||||
|
||||
int drm_fence_object_create(drm_device_t * dev, uint32_t type,
|
||||
unsigned flags, drm_fence_object_t ** c_fence)
|
||||
{
|
||||
drm_fence_object_t *fence;
|
||||
int ret;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
fence = drm_ctl_cache_alloc(drm_cache.fence_object,
|
||||
sizeof(*fence), GFP_KERNEL);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
ret = drm_fence_object_init(dev, type, flags, fence);
|
||||
if (ret) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
return ret;
|
||||
}
|
||||
*c_fence = fence;
|
||||
atomic_inc(&fm->count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_object_create);
|
||||
|
||||
void drm_fence_manager_init(drm_device_t * dev)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *fed = dev->driver->fence_driver;
|
||||
int i;
|
||||
|
||||
fm->lock = RW_LOCK_UNLOCKED;
|
||||
write_lock(&fm->lock);
|
||||
INIT_LIST_HEAD(&fm->ring);
|
||||
fm->pending_flush = 0;
|
||||
DRM_INIT_WAITQUEUE(&fm->fence_queue);
|
||||
fm->initialized = 0;
|
||||
if (fed) {
|
||||
fm->initialized = 1;
|
||||
atomic_set(&fm->count, 0);
|
||||
for (i = 0; i < fed->no_types; ++i) {
|
||||
fm->fence_types[i] = &fm->ring;
|
||||
}
|
||||
}
|
||||
write_unlock(&fm->lock);
|
||||
}
|
||||
|
||||
void drm_fence_manager_takedown(drm_device_t * dev)
|
||||
{
|
||||
}
|
||||
|
||||
drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_user_object_t *uo;
|
||||
drm_fence_object_t *fence;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
uo = drm_lookup_user_object(priv, handle);
|
||||
if (!uo || (uo->type != drm_fence_type)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return NULL;
|
||||
}
|
||||
fence = drm_user_object_entry(uo, drm_fence_object_t, base);
|
||||
atomic_inc(&fence->usage);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return fence;
|
||||
}
|
||||
|
||||
int drm_fence_ioctl(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
int ret;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_arg_t arg;
|
||||
drm_fence_object_t *fence;
|
||||
drm_user_object_t *uo;
|
||||
unsigned long flags;
|
||||
ret = 0;
|
||||
|
||||
if (!fm->initialized) {
|
||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
|
||||
switch (arg.op) {
|
||||
case drm_fence_create:
|
||||
if (arg.flags & DRM_FENCE_FLAG_EMIT)
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = drm_fence_add_user_object(priv, fence,
|
||||
arg.flags &
|
||||
DRM_FENCE_FLAG_SHAREABLE);
|
||||
if (ret) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* usage > 0. No need to lock dev->struct_mutex;
|
||||
*/
|
||||
|
||||
atomic_inc(&fence->usage);
|
||||
arg.handle = fence->base.hash.key;
|
||||
break;
|
||||
case drm_fence_destroy:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
uo = drm_lookup_user_object(priv, arg.handle);
|
||||
if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = drm_remove_user_object(priv, uo);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
case drm_fence_reference:
|
||||
ret =
|
||||
drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
|
||||
if (ret)
|
||||
return ret;
|
||||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
break;
|
||||
case drm_fence_unreference:
|
||||
ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
|
||||
return ret;
|
||||
case drm_fence_signaled:
|
||||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case drm_fence_flush:
|
||||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
ret = drm_fence_object_flush(dev, fence, arg.type);
|
||||
break;
|
||||
case drm_fence_wait:
|
||||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
ret =
|
||||
drm_fence_object_wait(dev, fence,
|
||||
arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
||||
0, arg.type);
|
||||
break;
|
||||
case drm_fence_emit:
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
|
||||
break;
|
||||
case drm_fence_buffers:
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
|
||||
NULL, &fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = drm_fence_add_user_object(priv, fence,
|
||||
arg.flags &
|
||||
DRM_FENCE_FLAG_SHAREABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
atomic_inc(&fence->usage);
|
||||
arg.handle = fence->base.hash.key;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
read_lock_irqsave(&fm->lock, flags);
|
||||
arg.class = fence->class;
|
||||
arg.type = fence->type;
|
||||
arg.signaled = fence->signaled;
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
|
||||
return ret;
|
||||
}
|
|
@ -47,6 +47,7 @@ static int drm_setup(drm_device_t * dev)
|
|||
int i;
|
||||
int ret;
|
||||
|
||||
|
||||
if (dev->driver->firstopen) {
|
||||
ret = dev->driver->firstopen(dev);
|
||||
if (ret != 0)
|
||||
|
@ -56,6 +57,7 @@ static int drm_setup(drm_device_t * dev)
|
|||
dev->magicfree.next = NULL;
|
||||
|
||||
/* prebuild the SAREA */
|
||||
|
||||
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
|
||||
if (i != 0)
|
||||
return i;
|
||||
|
@ -71,11 +73,11 @@ static int drm_setup(drm_device_t * dev)
|
|||
return i;
|
||||
}
|
||||
|
||||
for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
|
||||
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
|
||||
atomic_set(&dev->counts[i], 0);
|
||||
|
||||
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
|
||||
INIT_LIST_HEAD(&dev->magicfree);
|
||||
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
|
||||
INIT_LIST_HEAD(&dev->magicfree);
|
||||
|
||||
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
|
||||
if (dev->ctxlist == NULL)
|
||||
|
@ -156,6 +158,12 @@ int drm_open(struct inode *inode, struct file *filp)
|
|||
}
|
||||
spin_unlock(&dev->count_lock);
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
BUG_ON((dev->dev_mapping != NULL) &&
|
||||
(dev->dev_mapping != inode->i_mapping));
|
||||
if (dev->dev_mapping == NULL)
|
||||
dev->dev_mapping = inode->i_mapping;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
@ -175,7 +183,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
|
|||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int err = -ENODEV;
|
||||
struct file_operations *old_fops;
|
||||
const struct file_operations *old_fops;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
|
@ -233,6 +241,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||
int minor = iminor(inode);
|
||||
drm_file_t *priv;
|
||||
int ret;
|
||||
int i,j;
|
||||
|
||||
if (filp->f_flags & O_EXCL)
|
||||
return -EBUSY; /* No exclusive opens */
|
||||
|
@ -256,6 +265,22 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||
priv->authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->lock_count = 0;
|
||||
|
||||
INIT_LIST_HEAD(&priv->user_objects);
|
||||
INIT_LIST_HEAD(&priv->refd_objects);
|
||||
|
||||
for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
|
||||
ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
for(j=0; j<i; ++j) {
|
||||
drm_ht_remove(&priv->refd_object_hash[j]);
|
||||
}
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (dev->driver->open) {
|
||||
ret = dev->driver->open(dev, priv);
|
||||
if (ret < 0)
|
||||
|
@ -320,6 +345,53 @@ int drm_fasync(int fd, struct file *filp, int on)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fasync);
|
||||
|
||||
static void drm_object_release(struct file *filp) {
|
||||
|
||||
drm_file_t *priv = filp->private_data;
|
||||
struct list_head *head;
|
||||
drm_user_object_t *user_object;
|
||||
drm_ref_object_t *ref_object;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Free leftover ref objects created by me. Note that we cannot use
|
||||
* list_for_each() here, as the struct_mutex may be temporarily released
|
||||
* by the remove_() functions, and thus the lists may be altered.
|
||||
* Also, a drm_remove_ref_object() will not remove it
|
||||
* from the list unless its refcount is 1.
|
||||
*/
|
||||
|
||||
head = &priv->refd_objects;
|
||||
while (head->next != head) {
|
||||
ref_object = list_entry(head->next, drm_ref_object_t, list);
|
||||
drm_remove_ref_object(priv, ref_object);
|
||||
head = &priv->refd_objects;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free leftover user objects created by me.
|
||||
*/
|
||||
|
||||
head = &priv->user_objects;
|
||||
while (head->next != head) {
|
||||
user_object = list_entry(head->next, drm_user_object_t, list);
|
||||
drm_remove_user_object(priv, user_object);
|
||||
head = &priv->user_objects;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
|
||||
drm_ht_remove(&priv->refd_object_hash[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Release file.
|
||||
*
|
||||
|
@ -354,58 +426,43 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
current->pid, (long)old_encode_dev(priv->head->device),
|
||||
dev->open_count);
|
||||
|
||||
if (priv->lock_count && dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.filp == filp) {
|
||||
if (dev->driver->reclaim_buffers_locked) {
|
||||
unsigned long _end = jiffies + DRM_HZ*3;
|
||||
|
||||
do {
|
||||
retcode = drm_kernel_take_hw_lock(filp);
|
||||
} while(retcode && !time_after_eq(jiffies,_end));
|
||||
|
||||
if (!retcode) {
|
||||
dev->driver->reclaim_buffers_locked(dev, filp);
|
||||
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
} else {
|
||||
|
||||
/*
|
||||
* FIXME: This is not a good solution. We should perhaps associate the
|
||||
* DRM lock with a process context, and check whether the current process
|
||||
* holds the lock. Then we can run reclaim buffers locked anyway.
|
||||
*/
|
||||
|
||||
DRM_ERROR("Reclaim buffers locked deadlock.\n");
|
||||
DRM_ERROR("This is probably a single thread having multiple\n");
|
||||
DRM_ERROR("DRM file descriptors open either dying or "
|
||||
"closing file descriptors\n");
|
||||
DRM_ERROR("while having the lock. I will not reclaim buffers.\n");
|
||||
DRM_ERROR("Locking context is 0x%08x\n",
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
}
|
||||
} else if (drm_i_have_hw_lock(filp)) {
|
||||
DRM_DEBUG("File %p released, freeing lock for context %d\n",
|
||||
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
|
||||
if (dev->driver->reclaim_buffers_locked)
|
||||
dev->driver->reclaim_buffers_locked(dev, filp);
|
||||
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
|
||||
/* FIXME: may require heavy-handed reset of
|
||||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
} else if (dev->driver->reclaim_buffers_locked && priv->lock_count
|
||||
&& dev->lock.hw_lock) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE(entry, current);
|
||||
|
||||
add_wait_queue(&dev->lock.lock_queue, &entry);
|
||||
for (;;) {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!dev->lock.hw_lock) {
|
||||
/* Device has been unregistered */
|
||||
retcode = -EINTR;
|
||||
break;
|
||||
}
|
||||
if (drm_lock_take(&dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT)) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
break; /* Got lock */
|
||||
}
|
||||
/* Contention */
|
||||
schedule();
|
||||
if (signal_pending(current)) {
|
||||
retcode = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&dev->lock.lock_queue, &entry);
|
||||
if (!retcode) {
|
||||
dev->driver->reclaim_buffers_locked(dev, filp);
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
|
||||
!dev->driver->reclaim_buffers_locked) {
|
||||
dev->driver->reclaim_buffers(dev, filp);
|
||||
|
@ -414,6 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
drm_fasync(-1, filp, 0);
|
||||
|
||||
mutex_lock(&dev->ctxlist_mutex);
|
||||
|
||||
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
|
||||
drm_ctx_list_t *pos, *n;
|
||||
|
||||
|
@ -435,6 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
mutex_unlock(&dev->ctxlist_mutex);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_object_release(filp);
|
||||
if (priv->remove_auth_on_close == 1) {
|
||||
drm_file_t *temp = dev->file_first;
|
||||
while (temp) {
|
||||
|
|
|
@ -36,25 +36,34 @@
|
|||
#include "drm_hashtab.h"
|
||||
#include <linux/hash.h>
|
||||
|
||||
int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
|
||||
int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
ht->size = 1 << order;
|
||||
ht->order = order;
|
||||
ht->fill = 0;
|
||||
ht->table = vmalloc(ht->size*sizeof(*ht->table));
|
||||
ht->table = NULL;
|
||||
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
|
||||
if (!ht->use_vmalloc) {
|
||||
ht->table = drm_calloc(ht->size, sizeof(*ht->table),
|
||||
DRM_MEM_HASHTAB);
|
||||
}
|
||||
if (!ht->table) {
|
||||
ht->use_vmalloc = 1;
|
||||
ht->table = vmalloc(ht->size * sizeof(*ht->table));
|
||||
}
|
||||
if (!ht->table) {
|
||||
DRM_ERROR("Out of memory for hash table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i=0; i< ht->size; ++i) {
|
||||
for (i = 0; i < ht->size; ++i) {
|
||||
INIT_HLIST_HEAD(&ht->table[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
|
||||
void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
|
||||
{
|
||||
drm_hash_item_t *entry;
|
||||
struct hlist_head *h_list;
|
||||
|
@ -71,7 +80,7 @@ void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
|
|||
}
|
||||
}
|
||||
|
||||
static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
|
||||
static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
|
||||
unsigned long key)
|
||||
{
|
||||
drm_hash_item_t *entry;
|
||||
|
@ -91,8 +100,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
|
||||
int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
|
||||
{
|
||||
drm_hash_item_t *entry;
|
||||
struct hlist_head *h_list;
|
||||
|
@ -106,7 +114,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
|
|||
hlist_for_each(list, h_list) {
|
||||
entry = hlist_entry(list, drm_hash_item_t, head);
|
||||
if (entry->key == key)
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
parent = list;
|
||||
|
@ -123,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
|
|||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* used before.
|
||||
*/
|
||||
int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
|
||||
int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
|
||||
unsigned long seed, int bits, int shift,
|
||||
unsigned long add)
|
||||
{
|
||||
|
@ -138,7 +146,7 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
|
|||
ret = drm_ht_insert_item(ht, item);
|
||||
if (ret)
|
||||
unshifted_key = (unshifted_key + 1) & mask;
|
||||
} while(ret && (unshifted_key != first));
|
||||
} while (ret && (unshifted_key != first));
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Available key bit space exhausted\n");
|
||||
|
@ -147,20 +155,20 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
|
||||
drm_hash_item_t **item)
|
||||
int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
|
||||
drm_hash_item_t ** item)
|
||||
{
|
||||
struct hlist_node *list;
|
||||
|
||||
list = drm_ht_find_key(ht, key);
|
||||
if (!list)
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
|
||||
*item = hlist_entry(list, drm_hash_item_t, head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
|
||||
int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
|
||||
{
|
||||
struct hlist_node *list;
|
||||
|
||||
|
@ -170,21 +178,24 @@ int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
|
|||
ht->fill--;
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
|
||||
int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
|
||||
{
|
||||
hlist_del_init(&item->head);
|
||||
ht->fill--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_ht_remove(drm_open_hash_t *ht)
|
||||
void drm_ht_remove(drm_open_hash_t * ht)
|
||||
{
|
||||
if (ht->table) {
|
||||
vfree(ht->table);
|
||||
if (ht->use_vmalloc)
|
||||
vfree(ht->table);
|
||||
else
|
||||
drm_free(ht->table, ht->size * sizeof(*ht->table),
|
||||
DRM_MEM_HASHTAB);
|
||||
ht->table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ typedef struct drm_open_hash{
|
|||
unsigned int order;
|
||||
unsigned int fill;
|
||||
struct hlist_head *table;
|
||||
int use_vmalloc;
|
||||
} drm_open_hash_t;
|
||||
|
||||
|
||||
|
|
|
@ -125,9 +125,10 @@ int drm_setunique(struct inode *inode, struct file *filp,
|
|||
domain = bus >> 8;
|
||||
bus &= 0xff;
|
||||
|
||||
if ((domain != dev->pci_domain) ||
|
||||
(bus != dev->pci_bus) ||
|
||||
(slot != dev->pci_slot) || (func != dev->pci_func))
|
||||
if ((domain != drm_get_pci_domain(dev)) ||
|
||||
(bus != dev->pdev->bus->number) ||
|
||||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
|
||||
(func != PCI_FUNC(dev->pdev->devfn)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -145,7 +146,10 @@ static int drm_set_busid(drm_device_t * dev)
|
|||
return ENOMEM;
|
||||
|
||||
len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
|
||||
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
|
||||
drm_get_pci_domain(dev),
|
||||
dev->pdev->bus->number,
|
||||
PCI_SLOT(dev->pdev->devfn),
|
||||
PCI_FUNC(dev->pdev->devfn));
|
||||
if (len > dev->unique_len)
|
||||
DRM_ERROR("buffer overflow");
|
||||
|
||||
|
@ -238,7 +242,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
|
|||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_client_t __user *argp = (void __user *)arg;
|
||||
drm_client_t __user *argp = (drm_client_t __user *)arg;
|
||||
drm_client_t client;
|
||||
drm_file_t *pt;
|
||||
int idx;
|
||||
|
@ -325,21 +329,23 @@ int drm_setversion(DRM_IOCTL_ARGS)
|
|||
int if_version;
|
||||
drm_set_version_t __user *argp = (void __user *)data;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
|
||||
if (copy_from_user(&sv, argp, sizeof(sv)))
|
||||
return -EFAULT;
|
||||
|
||||
retv.drm_di_major = DRM_IF_MAJOR;
|
||||
retv.drm_di_minor = DRM_IF_MINOR;
|
||||
retv.drm_dd_major = dev->driver->major;
|
||||
retv.drm_dd_minor = dev->driver->minor;
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
|
||||
if (copy_to_user(argp, &retv, sizeof(sv)))
|
||||
return -EFAULT;
|
||||
|
||||
if (sv.drm_di_major != -1) {
|
||||
if (sv.drm_di_major != DRM_IF_MAJOR ||
|
||||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
|
||||
return EINVAL;
|
||||
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
|
||||
dev->if_version = DRM_MAX(if_version, dev->if_version);
|
||||
dev->if_version = max(if_version, dev->if_version);
|
||||
if (sv.drm_di_minor >= 1) {
|
||||
/*
|
||||
* Version 1.1 includes tying of DRM to specific device
|
||||
|
|
|
@ -64,9 +64,9 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
|
|||
if (copy_from_user(&p, argp, sizeof(p)))
|
||||
return -EFAULT;
|
||||
|
||||
if ((p.busnum >> 8) != dev->pci_domain ||
|
||||
(p.busnum & 0xff) != dev->pci_bus ||
|
||||
p.devnum != dev->pci_slot || p.funcnum != dev->pci_func)
|
||||
if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
|
||||
(p.busnum & 0xff) != dev->pdev->bus->number ||
|
||||
p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
|
||||
return -EINVAL;
|
||||
|
||||
p.irq = dev->irq;
|
||||
|
@ -118,8 +118,10 @@ static int drm_irq_install(drm_device_t * dev)
|
|||
init_waitqueue_head(&dev->vbl_queue);
|
||||
|
||||
spin_lock_init(&dev->vbl_lock);
|
||||
spin_lock_init(&dev->tasklet_lock);
|
||||
|
||||
INIT_LIST_HEAD(&dev->vbl_sigs.head);
|
||||
INIT_LIST_HEAD(&dev->vbl_sigs2.head);
|
||||
|
||||
dev->vbl_pending = 0;
|
||||
}
|
||||
|
@ -174,6 +176,8 @@ int drm_irq_uninstall(drm_device_t * dev)
|
|||
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
dev->locked_tasklet_func = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_irq_uninstall);
|
||||
|
@ -222,12 +226,12 @@ int drm_control(struct inode *inode, struct file *filp,
|
|||
* Wait for VBLANK.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.rm.
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param data user argument, pointing to a drm_wait_vblank structure.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Verifies the IRQ is installed
|
||||
* Verifies the IRQ is installed.
|
||||
*
|
||||
* If a signal is requested checks if this task has already scheduled the same signal
|
||||
* for the same vblank sequence number - nothing to be done in
|
||||
|
@ -245,19 +249,34 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
|
|||
drm_wait_vblank_t vblwait;
|
||||
struct timeval now;
|
||||
int ret = 0;
|
||||
unsigned int flags;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_IRQ_VBL))
|
||||
return -EINVAL;
|
||||
unsigned int flags, seq;
|
||||
|
||||
if ((!dev->irq) || (!dev->irq_enabled))
|
||||
return -EINVAL;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(vblwait, argp, sizeof(vblwait));
|
||||
if (copy_from_user(&vblwait, argp, sizeof(vblwait)))
|
||||
return -EFAULT;
|
||||
|
||||
switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) {
|
||||
if (vblwait.request.type &
|
||||
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
|
||||
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
|
||||
vblwait.request.type,
|
||||
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
|
||||
|
||||
if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
|
||||
DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
|
||||
return -EINVAL;
|
||||
|
||||
seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
|
||||
: &dev->vbl_received);
|
||||
|
||||
switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
|
||||
case _DRM_VBLANK_RELATIVE:
|
||||
vblwait.request.sequence += atomic_read(&dev->vbl_received);
|
||||
vblwait.request.sequence += seq;
|
||||
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
|
||||
case _DRM_VBLANK_ABSOLUTE:
|
||||
break;
|
||||
|
@ -265,26 +284,30 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
|
||||
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
|
||||
(seq - vblwait.request.sequence) <= (1<<23)) {
|
||||
vblwait.request.sequence = seq + 1;
|
||||
}
|
||||
|
||||
if (flags & _DRM_VBLANK_SIGNAL) {
|
||||
unsigned long irqflags;
|
||||
drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
|
||||
? &dev->vbl_sigs2 : &dev->vbl_sigs;
|
||||
drm_vbl_sig_t *vbl_sig;
|
||||
|
||||
vblwait.reply.sequence = atomic_read(&dev->vbl_received);
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
|
||||
/* Check if this task has already scheduled the same signal
|
||||
* for the same vblank sequence number; nothing to be done in
|
||||
* that case
|
||||
*/
|
||||
list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) {
|
||||
list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
|
||||
if (vbl_sig->sequence == vblwait.request.sequence
|
||||
&& vbl_sig->info.si_signo == vblwait.request.signal
|
||||
&& vbl_sig->task == current) {
|
||||
spin_unlock_irqrestore(&dev->vbl_lock,
|
||||
irqflags);
|
||||
vblwait.reply.sequence = seq;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
@ -312,11 +335,16 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
|
|||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
|
||||
list_add_tail((struct list_head *)vbl_sig, &dev->vbl_sigs.head);
|
||||
list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
|
||||
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
||||
vblwait.reply.sequence = seq;
|
||||
} else {
|
||||
if (dev->driver->vblank_wait)
|
||||
if (flags & _DRM_VBLANK_SECONDARY) {
|
||||
if (dev->driver->vblank_wait2)
|
||||
ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence);
|
||||
} else if (dev->driver->vblank_wait)
|
||||
ret =
|
||||
dev->driver->vblank_wait(dev,
|
||||
&vblwait.request.sequence);
|
||||
|
@ -327,7 +355,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
|
|||
}
|
||||
|
||||
done:
|
||||
DRM_COPY_TO_USER_IOCTL(argp, vblwait, sizeof(vblwait));
|
||||
if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -343,28 +372,109 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
|
|||
*/
|
||||
void drm_vbl_send_signals(drm_device_t * dev)
|
||||
{
|
||||
struct list_head *list, *tmp;
|
||||
drm_vbl_sig_t *vbl_sig;
|
||||
unsigned int vbl_seq = atomic_read(&dev->vbl_received);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, flags);
|
||||
|
||||
list_for_each_safe(list, tmp, &dev->vbl_sigs.head) {
|
||||
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
|
||||
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
|
||||
vbl_sig->info.si_code = vbl_seq;
|
||||
send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info,
|
||||
vbl_sig->task);
|
||||
for (i = 0; i < 2; i++) {
|
||||
struct list_head *list, *tmp;
|
||||
drm_vbl_sig_t *vbl_sig;
|
||||
drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
|
||||
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
|
||||
&dev->vbl_received);
|
||||
|
||||
list_del(list);
|
||||
list_for_each_safe(list, tmp, &vbl_sigs->head) {
|
||||
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
|
||||
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
|
||||
vbl_sig->info.si_code = vbl_seq;
|
||||
send_sig_info(vbl_sig->info.si_signo,
|
||||
&vbl_sig->info, vbl_sig->task);
|
||||
|
||||
drm_free(vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER);
|
||||
list_del(list);
|
||||
|
||||
dev->vbl_pending--;
|
||||
drm_free(vbl_sig, sizeof(*vbl_sig),
|
||||
DRM_MEM_DRIVER);
|
||||
|
||||
dev->vbl_pending--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vbl_send_signals);
|
||||
|
||||
/**
|
||||
* Tasklet wrapper function.
|
||||
*
|
||||
* \param data DRM device in disguise.
|
||||
*
|
||||
* Attempts to grab the HW lock and calls the driver callback on success. On
|
||||
* failure, leave the lock marked as contended so the callback can be called
|
||||
* from drm_unlock().
|
||||
*/
|
||||
static void drm_locked_tasklet_func(unsigned long data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t*)data;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
|
||||
|
||||
if (!dev->locked_tasklet_func ||
|
||||
!drm_lock_take(&dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT)) {
|
||||
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
|
||||
dev->locked_tasklet_func(dev);
|
||||
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT);
|
||||
|
||||
dev->locked_tasklet_func = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule a tasklet to call back a driver hook with the HW lock held.
|
||||
*
|
||||
* \param dev DRM device.
|
||||
* \param func Driver callback.
|
||||
*
|
||||
* This is intended for triggering actions that require the HW lock from an
|
||||
* interrupt handler. The lock will be grabbed ASAP after the interrupt handler
|
||||
* completes. Note that the callback may be called from interrupt or process
|
||||
* context, it must not make any assumptions about this. Also, the HW lock will
|
||||
* be held with the kernel context or any client context.
|
||||
*/
|
||||
void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*))
|
||||
{
|
||||
unsigned long irqflags;
|
||||
static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
|
||||
test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
|
||||
|
||||
if (dev->locked_tasklet_func) {
|
||||
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->locked_tasklet_func = func;
|
||||
|
||||
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
|
||||
|
||||
drm_tasklet.data = (unsigned long)dev;
|
||||
|
||||
tasklet_hi_schedule(&drm_tasklet);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_locked_tasklet);
|
||||
|
|
|
@ -35,9 +35,12 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
#if 0
|
||||
static int drm_lock_transfer(drm_device_t * dev,
|
||||
__volatile__ unsigned int *lock,
|
||||
unsigned int context);
|
||||
#endif
|
||||
|
||||
static int drm_notifier(void *priv);
|
||||
|
||||
/**
|
||||
|
@ -104,7 +107,7 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&dev->lock.lock_queue, &entry);
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
if (ret) return ret;
|
||||
|
||||
sigemptyset(&dev->sigmask);
|
||||
|
@ -152,6 +155,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
|
|||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_lock_t lock;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
|
||||
return -EFAULT;
|
||||
|
@ -162,6 +166,16 @@ int drm_unlock(struct inode *inode, struct file *filp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
|
||||
|
||||
if (dev->locked_tasklet_func) {
|
||||
dev->locked_tasklet_func(dev);
|
||||
|
||||
dev->locked_tasklet_func = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
|
||||
|
||||
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
|
||||
|
||||
/* kernel_context_switch isn't used by any of the x86 drm
|
||||
|
@ -170,12 +184,9 @@ int drm_unlock(struct inode *inode, struct file *filp,
|
|||
if (dev->driver->kernel_context_switch_unlock)
|
||||
dev->driver->kernel_context_switch_unlock(dev);
|
||||
else {
|
||||
drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT);
|
||||
|
||||
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT)) {
|
||||
DRM_ERROR("\n");
|
||||
lock.context)) {
|
||||
/* FIXME: Should really bail out here. */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,7 +212,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|||
if (old & _DRM_LOCK_HELD)
|
||||
new = old | _DRM_LOCK_CONT;
|
||||
else
|
||||
new = context | _DRM_LOCK_HELD;
|
||||
new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
|
||||
prev = cmpxchg(lock, old, new);
|
||||
} while (prev != old);
|
||||
if (_DRM_LOCKING_CONTEXT(old) == context) {
|
||||
|
@ -213,13 +224,14 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
if (new == (context | _DRM_LOCK_HELD)) {
|
||||
if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
|
||||
/* Have lock */
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* This takes a lock forcibly and hands it to context. Should ONLY be used
|
||||
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
|
||||
|
@ -246,6 +258,7 @@ static int drm_lock_transfer(drm_device_t * dev,
|
|||
} while (prev != old);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Free lock.
|
||||
|
@ -263,12 +276,12 @@ int drm_lock_free(drm_device_t * dev,
|
|||
{
|
||||
unsigned int old, new, prev;
|
||||
|
||||
dev->lock.filp = NULL;
|
||||
do {
|
||||
old = *lock;
|
||||
new = 0;
|
||||
new = _DRM_LOCKING_CONTEXT(old);
|
||||
prev = cmpxchg(lock, old, new);
|
||||
} while (prev != old);
|
||||
|
||||
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
|
||||
DRM_ERROR("%d freed heavyweight lock held by %d\n",
|
||||
context, _DRM_LOCKING_CONTEXT(old));
|
||||
|
@ -308,3 +321,66 @@ static int drm_notifier(void *priv)
|
|||
} while (prev != old);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Can be used by drivers to take the hardware lock if necessary.
|
||||
* (Waiting for idle before reclaiming buffers etc.)
|
||||
*/
|
||||
|
||||
int drm_i_have_hw_lock(struct file *filp)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
||||
return (priv->lock_count && dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.filp == filp);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_i_have_hw_lock);
|
||||
|
||||
int drm_kernel_take_hw_lock(struct file *filp)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
||||
int ret = 0;
|
||||
unsigned long _end = jiffies + 3*DRM_HZ;
|
||||
|
||||
if (!drm_i_have_hw_lock(filp)) {
|
||||
|
||||
DECLARE_WAITQUEUE(entry, current);
|
||||
|
||||
add_wait_queue(&dev->lock.lock_queue, &entry);
|
||||
for (;;) {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!dev->lock.hw_lock) {
|
||||
/* Device has been unregistered */
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if (drm_lock_take(&dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT)) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
break; /* Got lock */
|
||||
}
|
||||
/* Contention */
|
||||
if (time_after_eq(jiffies,_end)) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
schedule_timeout(1);
|
||||
if (signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&dev->lock.lock_queue, &entry);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_kernel_take_hw_lock);
|
||||
|
||||
|
|
|
@ -33,10 +33,78 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/highmem.h>
|
||||
#include "drmP.h"
|
||||
|
||||
static struct {
|
||||
spinlock_t lock;
|
||||
drm_u64_t cur_used;
|
||||
drm_u64_t low_threshold;
|
||||
drm_u64_t high_threshold;
|
||||
} drm_memctl = {
|
||||
.lock = SPIN_LOCK_UNLOCKED
|
||||
};
|
||||
|
||||
static inline size_t drm_size_align(size_t size) {
|
||||
|
||||
register size_t tmpSize = 4;
|
||||
if (size > PAGE_SIZE)
|
||||
return PAGE_ALIGN(size);
|
||||
|
||||
while(tmpSize < size)
|
||||
tmpSize <<= 1;
|
||||
|
||||
return (size_t) tmpSize;
|
||||
}
|
||||
|
||||
int drm_alloc_memctl(size_t size)
|
||||
{
|
||||
int ret;
|
||||
unsigned long a_size = drm_size_align(size);
|
||||
|
||||
spin_lock(&drm_memctl.lock);
|
||||
ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
|
||||
-ENOMEM : 0;
|
||||
if (!ret)
|
||||
drm_memctl.cur_used += a_size;
|
||||
spin_unlock(&drm_memctl.lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_alloc_memctl);
|
||||
|
||||
void drm_free_memctl(size_t size)
|
||||
{
|
||||
unsigned long a_size = drm_size_align(size);
|
||||
|
||||
spin_lock(&drm_memctl.lock);
|
||||
drm_memctl.cur_used -= a_size;
|
||||
spin_unlock(&drm_memctl.lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_free_memctl);
|
||||
|
||||
void drm_query_memctl(drm_u64_t *cur_used,
|
||||
drm_u64_t *low_threshold,
|
||||
drm_u64_t *high_threshold)
|
||||
{
|
||||
spin_lock(&drm_memctl.lock);
|
||||
*cur_used = drm_memctl.cur_used;
|
||||
*low_threshold = drm_memctl.low_threshold;
|
||||
*high_threshold = drm_memctl.high_threshold;
|
||||
spin_unlock(&drm_memctl.lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_query_memctl);
|
||||
|
||||
void drm_init_memctl(size_t p_low_threshold,
|
||||
size_t p_high_threshold)
|
||||
{
|
||||
spin_lock(&drm_memctl.lock);
|
||||
drm_memctl.cur_used = 0;
|
||||
drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
|
||||
drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
|
||||
spin_unlock(&drm_memctl.lock);
|
||||
}
|
||||
|
||||
|
||||
#ifndef DEBUG_MEMORY
|
||||
|
||||
/** No-op. */
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "drmP.h"
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
|
||||
#ifdef DEBUG_MEMORY
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
|
||||
typedef struct drm_mem_stats {
|
||||
|
|
|
@ -42,36 +42,137 @@
|
|||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include <linux/slab.h>
|
||||
|
||||
unsigned long drm_mm_tail_space(drm_mm_t *mm)
|
||||
{
|
||||
struct list_head *tail_node;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
tail_node = mm->root_node.ml_entry.prev;
|
||||
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
|
||||
if (!entry->free)
|
||||
return 0;
|
||||
|
||||
return entry->size;
|
||||
}
|
||||
|
||||
int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
|
||||
{
|
||||
struct list_head *tail_node;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
tail_node = mm->root_node.ml_entry.prev;
|
||||
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
|
||||
if (!entry->free)
|
||||
return -ENOMEM;
|
||||
|
||||
if (entry->size <= size)
|
||||
return -ENOMEM;
|
||||
|
||||
entry->size -= size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int drm_mm_create_tail_node(drm_mm_t *mm,
|
||||
unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
drm_mm_node_t *child;
|
||||
|
||||
child = (drm_mm_node_t *)
|
||||
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
|
||||
GFP_KERNEL);
|
||||
if (!child)
|
||||
return -ENOMEM;
|
||||
|
||||
child->free = 1;
|
||||
child->size = size;
|
||||
child->start = start;
|
||||
child->mm = mm;
|
||||
|
||||
list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
|
||||
list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
|
||||
{
|
||||
struct list_head *tail_node;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
tail_node = mm->root_node.ml_entry.prev;
|
||||
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
|
||||
if (!entry->free) {
|
||||
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
|
||||
}
|
||||
entry->size += size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
|
||||
unsigned long size)
|
||||
{
|
||||
drm_mm_node_t *child;
|
||||
|
||||
child = (drm_mm_node_t *)
|
||||
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
|
||||
GFP_KERNEL);
|
||||
if (!child)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&child->fl_entry);
|
||||
|
||||
child->free = 0;
|
||||
child->size = size;
|
||||
child->start = parent->start;
|
||||
child->mm = parent->mm;
|
||||
|
||||
list_add_tail(&child->ml_entry, &parent->ml_entry);
|
||||
INIT_LIST_HEAD(&child->fl_entry);
|
||||
|
||||
parent->size -= size;
|
||||
parent->start += size;
|
||||
return child;
|
||||
}
|
||||
|
||||
|
||||
|
||||
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
|
||||
unsigned long size, unsigned alignment)
|
||||
{
|
||||
|
||||
drm_mm_node_t *align_splitoff = NULL;
|
||||
drm_mm_node_t *child;
|
||||
unsigned tmp = 0;
|
||||
|
||||
if (alignment)
|
||||
size += alignment - 1;
|
||||
|
||||
tmp = size % alignment;
|
||||
|
||||
if (tmp) {
|
||||
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
|
||||
if (!align_splitoff)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (parent->size == size) {
|
||||
list_del_init(&parent->fl_entry);
|
||||
parent->free = 0;
|
||||
return parent;
|
||||
} else {
|
||||
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
|
||||
if (!child)
|
||||
child = drm_mm_split_at_start(parent, size);
|
||||
if (!child) {
|
||||
if (align_splitoff)
|
||||
drm_mm_put_block(align_splitoff);
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&child->ml_entry);
|
||||
INIT_LIST_HEAD(&child->fl_entry);
|
||||
|
||||
child->free = 0;
|
||||
child->size = size;
|
||||
child->start = parent->start;
|
||||
|
||||
list_add_tail(&child->ml_entry, &parent->ml_entry);
|
||||
parent->size -= size;
|
||||
parent->start += size;
|
||||
}
|
||||
}
|
||||
if (align_splitoff)
|
||||
drm_mm_put_block(align_splitoff);
|
||||
|
||||
return child;
|
||||
}
|
||||
|
||||
|
@ -80,9 +181,10 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
|
|||
* Otherwise add to the free stack.
|
||||
*/
|
||||
|
||||
void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
|
||||
void drm_mm_put_block(drm_mm_node_t * cur)
|
||||
{
|
||||
|
||||
drm_mm_t *mm = cur->mm;
|
||||
drm_mm_node_t *list_root = &mm->root_node;
|
||||
struct list_head *cur_head = &cur->ml_entry;
|
||||
struct list_head *root_head = &list_root->ml_entry;
|
||||
|
@ -105,8 +207,9 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
|
|||
prev_node->size += next_node->size;
|
||||
list_del(&next_node->ml_entry);
|
||||
list_del(&next_node->fl_entry);
|
||||
drm_free(next_node, sizeof(*next_node),
|
||||
DRM_MEM_MM);
|
||||
drm_ctl_cache_free(drm_cache.mm,
|
||||
sizeof(*next_node),
|
||||
next_node);
|
||||
} else {
|
||||
next_node->size += cur->size;
|
||||
next_node->start = cur->start;
|
||||
|
@ -119,7 +222,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
|
|||
list_add(&cur->fl_entry, &list_root->fl_entry);
|
||||
} else {
|
||||
list_del(&cur->ml_entry);
|
||||
drm_free(cur, sizeof(*cur), DRM_MEM_MM);
|
||||
drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,16 +235,23 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
|||
drm_mm_node_t *entry;
|
||||
drm_mm_node_t *best;
|
||||
unsigned long best_size;
|
||||
unsigned wasted;
|
||||
|
||||
best = NULL;
|
||||
best_size = ~0UL;
|
||||
|
||||
if (alignment)
|
||||
size += alignment - 1;
|
||||
|
||||
list_for_each(list, free_stack) {
|
||||
entry = list_entry(list, drm_mm_node_t, fl_entry);
|
||||
if (entry->size >= size) {
|
||||
wasted = 0;
|
||||
|
||||
if (alignment) {
|
||||
register unsigned tmp = size % alignment;
|
||||
if (tmp)
|
||||
wasted += alignment - tmp;
|
||||
}
|
||||
|
||||
|
||||
if (entry->size >= size + wasted) {
|
||||
if (!best_match)
|
||||
return entry;
|
||||
if (size < best_size) {
|
||||
|
@ -154,27 +264,19 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
|||
return best;
|
||||
}
|
||||
|
||||
int drm_mm_clean(drm_mm_t * mm)
|
||||
{
|
||||
struct list_head *head = &mm->root_node.ml_entry;
|
||||
|
||||
return (head->next->next == head);
|
||||
}
|
||||
|
||||
int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
|
||||
{
|
||||
drm_mm_node_t *child;
|
||||
|
||||
INIT_LIST_HEAD(&mm->root_node.ml_entry);
|
||||
INIT_LIST_HEAD(&mm->root_node.fl_entry);
|
||||
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
|
||||
if (!child)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&child->ml_entry);
|
||||
INIT_LIST_HEAD(&child->fl_entry);
|
||||
|
||||
child->start = start;
|
||||
child->size = size;
|
||||
child->free = 1;
|
||||
|
||||
list_add(&child->fl_entry, &mm->root_node.fl_entry);
|
||||
list_add(&child->ml_entry, &mm->root_node.ml_entry);
|
||||
|
||||
return 0;
|
||||
return drm_mm_create_tail_node(mm, start, size);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_mm_init);
|
||||
|
@ -194,8 +296,7 @@ void drm_mm_takedown(drm_mm_t * mm)
|
|||
|
||||
list_del(&entry->fl_entry);
|
||||
list_del(&entry->ml_entry);
|
||||
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
|
||||
drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_mm_takedown);
|
||||
|
|
|
@ -0,0 +1,287 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
|
||||
int shareable)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
||||
atomic_set(&item->refcount, 1);
|
||||
item->shareable = shareable;
|
||||
item->owner = priv;
|
||||
|
||||
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
|
||||
(unsigned long)item, 32, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_add_tail(&item->list, &priv->user_objects);
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_hash_item_t *hash;
|
||||
int ret;
|
||||
drm_user_object_t *item;
|
||||
|
||||
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
|
||||
if (ret) {
|
||||
return NULL;
|
||||
}
|
||||
item = drm_hash_entry(hash, drm_user_object_t, hash);
|
||||
|
||||
if (priv != item->owner) {
|
||||
drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE];
|
||||
ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
|
||||
if (ret) {
|
||||
DRM_ERROR("Object not registered for usage\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
||||
if (atomic_dec_and_test(&item->refcount)) {
|
||||
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
|
||||
BUG_ON(ret);
|
||||
list_del_init(&item->list);
|
||||
item->remove(priv, item);
|
||||
}
|
||||
}
|
||||
|
||||
int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
|
||||
{
|
||||
if (item->owner != priv) {
|
||||
DRM_ERROR("Cannot destroy object not owned by you.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
item->owner = 0;
|
||||
item->shareable = 0;
|
||||
list_del_init(&item->list);
|
||||
drm_deref_user_object(priv, item);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
|
||||
drm_ref_t action)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (action) {
|
||||
case _DRM_REF_USE:
|
||||
atomic_inc(&ro->refcount);
|
||||
break;
|
||||
default:
|
||||
if (!ro->ref_struct_locked) {
|
||||
break;
|
||||
} else {
|
||||
ro->ref_struct_locked(priv, ro, action);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
|
||||
drm_ref_t ref_action)
|
||||
{
|
||||
int ret = 0;
|
||||
drm_ref_object_t *item;
|
||||
drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
|
||||
|
||||
if (!referenced_object->shareable && priv != referenced_object->owner) {
|
||||
DRM_ERROR("Not allowed to reference this object\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is not a usage reference, Check that usage has been registered
|
||||
* first. Otherwise strange things may happen on destruction.
|
||||
*/
|
||||
|
||||
if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
|
||||
item =
|
||||
drm_lookup_ref_object(priv, referenced_object,
|
||||
_DRM_REF_USE);
|
||||
if (!item) {
|
||||
DRM_ERROR
|
||||
("Object not registered for usage by this client\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL !=
|
||||
(item =
|
||||
drm_lookup_ref_object(priv, referenced_object, ref_action))) {
|
||||
atomic_inc(&item->refcount);
|
||||
return drm_object_ref_action(priv, referenced_object,
|
||||
ref_action);
|
||||
}
|
||||
|
||||
item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
|
||||
if (item == NULL) {
|
||||
DRM_ERROR("Could not allocate reference object\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
atomic_set(&item->refcount, 1);
|
||||
item->hash.key = (unsigned long)referenced_object;
|
||||
ret = drm_ht_insert_item(ht, &item->hash);
|
||||
item->unref_action = ref_action;
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
list_add(&item->list, &priv->refd_objects);
|
||||
ret = drm_object_ref_action(priv, referenced_object, ref_action);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
|
||||
drm_user_object_t * referenced_object,
|
||||
drm_ref_t ref_action)
|
||||
{
|
||||
drm_hash_item_t *hash;
|
||||
int ret;
|
||||
|
||||
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
|
||||
(unsigned long)referenced_object, &hash);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return drm_hash_entry(hash, drm_ref_object_t, hash);
|
||||
}
|
||||
|
||||
static void drm_remove_other_references(drm_file_t * priv,
|
||||
drm_user_object_t * ro)
|
||||
{
|
||||
int i;
|
||||
drm_open_hash_t *ht;
|
||||
drm_hash_item_t *hash;
|
||||
drm_ref_object_t *item;
|
||||
|
||||
for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
|
||||
ht = &priv->refd_object_hash[i];
|
||||
while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
|
||||
item = drm_hash_entry(hash, drm_ref_object_t, hash);
|
||||
drm_remove_ref_object(priv, item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
|
||||
{
|
||||
int ret;
|
||||
drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key;
|
||||
drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
|
||||
drm_ref_t unref_action;
|
||||
|
||||
unref_action = item->unref_action;
|
||||
if (atomic_dec_and_test(&item->refcount)) {
|
||||
ret = drm_ht_remove_item(ht, &item->hash);
|
||||
BUG_ON(ret);
|
||||
list_del_init(&item->list);
|
||||
if (unref_action == _DRM_REF_USE)
|
||||
drm_remove_other_references(priv, user_object);
|
||||
drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
|
||||
}
|
||||
|
||||
switch (unref_action) {
|
||||
case _DRM_REF_USE:
|
||||
drm_deref_user_object(priv, user_object);
|
||||
break;
|
||||
default:
|
||||
BUG_ON(!user_object->unref);
|
||||
user_object->unref(priv, user_object, unref_action);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
|
||||
drm_object_type_t type, drm_user_object_t ** object)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_user_object_t *uo;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
uo = drm_lookup_user_object(priv, user_token);
|
||||
if (!uo || (uo->type != type)) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
*object = uo;
|
||||
DRM_ERROR("Referenced an object\n");
|
||||
return 0;
|
||||
out_err:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
|
||||
drm_object_type_t type)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_user_object_t *uo;
|
||||
drm_ref_object_t *ro;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
uo = drm_lookup_user_object(priv, user_token);
|
||||
if (!uo || (uo->type != type)) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
|
||||
if (!ro) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
drm_remove_ref_object(priv, ro);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
DRM_ERROR("Unreferenced an object\n");
|
||||
return 0;
|
||||
out_err:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
|
@ -37,6 +37,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include "drmP.h"
|
||||
|
||||
/**********************************************************************/
|
||||
|
@ -83,11 +84,7 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
|
|||
return NULL;
|
||||
|
||||
dmah->size = size;
|
||||
#if 0
|
||||
dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
|
||||
#else
|
||||
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
|
||||
#endif
|
||||
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
if (dmah->vaddr == NULL) {
|
||||
|
@ -112,14 +109,12 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
|
|||
|
||||
memset(dmah->vaddr, 0, size);
|
||||
|
||||
#if 1
|
||||
/* XXX - Is virt_to_page() legal for consistent mem? */
|
||||
/* Reserve */
|
||||
for (addr = (unsigned long)dmah->vaddr, sz = size;
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
SetPageReserved(virt_to_page(addr));
|
||||
}
|
||||
#endif
|
||||
|
||||
return dmah;
|
||||
}
|
||||
|
@ -132,10 +127,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
|
|||
*/
|
||||
void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
||||
{
|
||||
#if 1
|
||||
unsigned long addr;
|
||||
size_t sz;
|
||||
#endif
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
int area = DRM_MEM_DMA;
|
||||
int alloc_count;
|
||||
|
@ -147,21 +140,14 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
|||
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
|
||||
#endif
|
||||
} else {
|
||||
#if 1
|
||||
/* XXX - Is virt_to_page() legal for consistent mem? */
|
||||
/* Unreserve */
|
||||
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
|
||||
dmah->busaddr);
|
||||
#else
|
||||
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
|
||||
dmah->busaddr);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
|
@ -181,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
|||
}
|
||||
|
||||
/**
|
||||
* \brief Free a PCI consistent memory block.
|
||||
* \brief Free a PCI consistent memory block
|
||||
*/
|
||||
void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
||||
{
|
||||
|
|
|
@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
|
|||
int request, int *eof, void *data);
|
||||
static int drm_bufs_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int drm_objects_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#if DRM_DEBUG_CODE
|
||||
static int drm_vma_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
|
@ -67,6 +69,7 @@ static struct drm_proc_list {
|
|||
{"clients", drm_clients_info},
|
||||
{"queues", drm_queues_info},
|
||||
{"bufs", drm_bufs_info},
|
||||
{"objects", drm_objects_info},
|
||||
#if DRM_DEBUG_CODE
|
||||
{"vma", drm_vma_info},
|
||||
#endif
|
||||
|
@ -238,10 +241,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
|
|||
type = "??";
|
||||
else
|
||||
type = types[map->type];
|
||||
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
|
||||
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
|
||||
i,
|
||||
map->offset,
|
||||
map->size, type, map->flags, r_list->user_token);
|
||||
map->size, type, map->flags,
|
||||
(unsigned long) r_list->user_token);
|
||||
|
||||
if (map->mtrr < 0) {
|
||||
DRM_PROC_PRINT("none\n");
|
||||
|
@ -258,7 +262,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
|
|||
}
|
||||
|
||||
/**
|
||||
* Simply calls _vm_info() while holding the drm_device::struct_sem lock.
|
||||
* Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
|
@ -331,7 +335,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
|
|||
}
|
||||
|
||||
/**
|
||||
* Simply calls _queues_info() while holding the drm_device::struct_sem lock.
|
||||
* Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
|
@ -403,7 +407,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
|
|||
}
|
||||
|
||||
/**
|
||||
* Simply calls _bufs_info() while holding the drm_device::struct_sem lock.
|
||||
* Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
|
@ -417,6 +421,89 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when "/proc/dri/.../objects" is read.
|
||||
*
|
||||
* \param buf output buffer.
|
||||
* \param start start of output data.
|
||||
* \param offset requested start offset.
|
||||
* \param request requested number of bytes.
|
||||
* \param eof whether there is no more data to return.
|
||||
* \param data private data.
|
||||
* \return number of written bytes.
|
||||
*/
|
||||
static int drm__objects_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) data;
|
||||
int len = 0;
|
||||
drm_buffer_manager_t *bm = &dev->bm;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_u64_t used_mem;
|
||||
drm_u64_t low_mem;
|
||||
drm_u64_t high_mem;
|
||||
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
if (fm->initialized) {
|
||||
DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",
|
||||
atomic_read(&fm->count));
|
||||
} else {
|
||||
DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
|
||||
}
|
||||
|
||||
if (bm->initialized) {
|
||||
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
|
||||
atomic_read(&bm->count));
|
||||
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
|
||||
} else {
|
||||
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
|
||||
}
|
||||
|
||||
drm_query_memctl(&used_mem, &low_mem, &high_mem);
|
||||
|
||||
if (used_mem > 16*PAGE_SIZE) {
|
||||
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
|
||||
(unsigned long) (used_mem >> PAGE_SHIFT));
|
||||
} else {
|
||||
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
|
||||
(unsigned long) used_mem);
|
||||
}
|
||||
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
|
||||
(unsigned long) (low_mem >> PAGE_SHIFT));
|
||||
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
|
||||
(unsigned long) (high_mem >> PAGE_SHIFT));
|
||||
|
||||
DRM_PROC_PRINT("\n");
|
||||
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_objects_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) data;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm__objects_info(buf, start, offset, request, eof, data);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when "/proc/dri/.../clients" is read.
|
||||
*
|
||||
|
@ -459,7 +546,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
|
|||
}
|
||||
|
||||
/**
|
||||
* Simply calls _clients_info() while holding the drm_device::struct_sem lock.
|
||||
* Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_clients_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
|
@ -500,7 +587,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
|
|||
for (pt = dev->vmalist; pt; pt = pt->next) {
|
||||
if (!(vma = pt->vma))
|
||||
continue;
|
||||
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
|
||||
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
|
||||
pt->pid,
|
||||
vma->vm_start,
|
||||
vma->vm_end,
|
||||
|
@ -510,7 +597,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
|
|||
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
|
||||
vma->vm_flags & VM_LOCKED ? 'l' : '-',
|
||||
vma->vm_flags & VM_IO ? 'i' : '-',
|
||||
VM_OFFSET(vma));
|
||||
vma->vm_pgoff);
|
||||
|
||||
#if defined(__i386__)
|
||||
pgprot = pgprot_val(vma->vm_page_prot);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "drmP.h"
|
||||
|
||||
|
|
|
@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
|
|||
|
||||
static void drm_sman_mm_free(void *private, void *ref)
|
||||
{
|
||||
drm_mm_t *mm = (drm_mm_t *) private;
|
||||
drm_mm_node_t *node = (drm_mm_node_t *) ref;
|
||||
|
||||
drm_mm_put_block(mm, node);
|
||||
drm_mm_put_block(node);
|
||||
}
|
||||
|
||||
static void drm_sman_mm_destroy(void *private)
|
||||
|
@ -114,7 +113,7 @@ static void drm_sman_mm_destroy(void *private)
|
|||
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
|
||||
}
|
||||
|
||||
unsigned long drm_sman_mm_offset(void *private, void *ref)
|
||||
static unsigned long drm_sman_mm_offset(void *private, void *ref)
|
||||
{
|
||||
drm_mm_node_t *node = (drm_mm_node_t *) ref;
|
||||
return node->start;
|
||||
|
|
|
@ -54,6 +54,11 @@ drm_head_t **drm_heads;
|
|||
struct drm_sysfs_class *drm_class;
|
||||
struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
drm_cache_t drm_cache =
|
||||
{ .mm = NULL,
|
||||
.fence_object = NULL
|
||||
};
|
||||
|
||||
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver *driver)
|
||||
|
@ -61,31 +66,44 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
|||
int retcode;
|
||||
|
||||
spin_lock_init(&dev->count_lock);
|
||||
spin_lock_init(&dev->drw_lock);
|
||||
spin_lock_init(&dev->tasklet_lock);
|
||||
init_timer(&dev->timer);
|
||||
mutex_init(&dev->struct_mutex);
|
||||
mutex_init(&dev->ctxlist_mutex);
|
||||
mutex_init(&dev->bm.init_mutex);
|
||||
|
||||
dev->pdev = pdev;
|
||||
dev->pci_device = pdev->device;
|
||||
dev->pci_vendor = pdev->vendor;
|
||||
|
||||
#ifdef __alpha__
|
||||
dev->hose = pdev->sysdata;
|
||||
dev->pci_domain = dev->hose->bus->number;
|
||||
#else
|
||||
dev->pci_domain = 0;
|
||||
#endif
|
||||
dev->pci_bus = pdev->bus->number;
|
||||
dev->pci_slot = PCI_SLOT(pdev->devfn);
|
||||
dev->pci_func = PCI_FUNC(pdev->devfn);
|
||||
dev->irq = pdev->irq;
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
|
||||
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
|
||||
DRM_FILE_PAGE_OFFSET_SIZE)) {
|
||||
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
|
||||
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
drm_mm_takedown(&dev->offset_manager);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
if (dev->maplist == NULL)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&dev->maplist->head);
|
||||
if (drm_ht_create(&dev->map_hash, 12)) {
|
||||
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* the DRM has 6 counters */
|
||||
dev->counters = 6;
|
||||
|
@ -127,6 +145,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
|||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
drm_fence_manager_init(dev);
|
||||
return 0;
|
||||
|
||||
error_out_unreg:
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/err.h>
|
||||
|
|
|
@ -0,0 +1,519 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
static void drm_ttm_ipi_handler(void *null)
|
||||
{
|
||||
flush_agp_cache();
|
||||
}
|
||||
|
||||
static void drm_ttm_cache_flush(void)
|
||||
{
|
||||
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
|
||||
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
||||
*/
|
||||
|
||||
static void *ttm_alloc(unsigned long size, int type)
|
||||
{
|
||||
void *ret = NULL;
|
||||
|
||||
if (drm_alloc_memctl(size))
|
||||
return NULL;
|
||||
if (size <= PAGE_SIZE) {
|
||||
ret = drm_alloc(size, type);
|
||||
}
|
||||
if (!ret) {
|
||||
ret = vmalloc(size);
|
||||
}
|
||||
if (!ret) {
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_free(void *pointer, unsigned long size, int type)
|
||||
{
|
||||
|
||||
if ((unsigned long)pointer >= VMALLOC_START &&
|
||||
(unsigned long)pointer <= VMALLOC_END) {
|
||||
vfree(pointer);
|
||||
} else {
|
||||
drm_free(pointer, size, type);
|
||||
}
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap all vma pages from vmas mapping this ttm.
|
||||
*/
|
||||
|
||||
static int unmap_vma_pages(drm_ttm_t * ttm)
|
||||
{
|
||||
drm_device_t *dev = ttm->dev;
|
||||
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
|
||||
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
int ret;
|
||||
ret = drm_ttm_lock_mm(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
#endif
|
||||
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_ttm_finish_unmap(ttm);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Change caching policy for the linear kernel map
|
||||
* for range of pages in a ttm.
|
||||
*/
|
||||
|
||||
static int drm_set_caching(drm_ttm_t * ttm, int noncached)
|
||||
{
|
||||
int i;
|
||||
struct page **cur_page;
|
||||
int do_tlbflush = 0;
|
||||
|
||||
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
|
||||
return 0;
|
||||
|
||||
if (noncached)
|
||||
drm_ttm_cache_flush();
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages + i;
|
||||
if (*cur_page) {
|
||||
if (!PageHighMem(*cur_page)) {
|
||||
if (noncached) {
|
||||
map_page_into_agp(*cur_page);
|
||||
} else {
|
||||
unmap_page_from_agp(*cur_page);
|
||||
}
|
||||
do_tlbflush = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (do_tlbflush)
|
||||
flush_agp_mappings();
|
||||
|
||||
DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all resources associated with a ttm.
|
||||
*/
|
||||
|
||||
int drm_destroy_ttm(drm_ttm_t * ttm)
|
||||
{
|
||||
|
||||
int i;
|
||||
struct page **cur_page;
|
||||
drm_ttm_backend_t *be;
|
||||
|
||||
if (!ttm)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&ttm->vma_count) > 0) {
|
||||
ttm->destroy = 1;
|
||||
DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
DRM_DEBUG("Destroying a ttm\n");
|
||||
|
||||
#ifdef DRM_TTM_ODD_COMPAT
|
||||
BUG_ON(!list_empty(&ttm->vma_list));
|
||||
BUG_ON(!list_empty(&ttm->p_mm_list));
|
||||
#endif
|
||||
be = ttm->be;
|
||||
if (be) {
|
||||
be->destroy(be);
|
||||
ttm->be = NULL;
|
||||
}
|
||||
|
||||
if (ttm->pages) {
|
||||
drm_buffer_manager_t *bm = &ttm->dev->bm;
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
|
||||
drm_set_caching(ttm, 0);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages + i;
|
||||
if (*cur_page) {
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
unlock_page(*cur_page);
|
||||
#else
|
||||
ClearPageReserved(*cur_page);
|
||||
#endif
|
||||
if (page_count(*cur_page) != 1) {
|
||||
DRM_ERROR("Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
}
|
||||
if (page_mapped(*cur_page)) {
|
||||
DRM_ERROR("Erroneous map count. "
|
||||
"Leaking page mappings.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* End debugging.
|
||||
*/
|
||||
|
||||
drm_free_gatt_pages(*cur_page, 0);
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
--bm->cur_pages;
|
||||
}
|
||||
}
|
||||
ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
|
||||
DRM_MEM_TTM);
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
|
||||
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_ttm_populate(drm_ttm_t * ttm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long i;
|
||||
drm_buffer_manager_t *bm;
|
||||
drm_ttm_backend_t *be;
|
||||
|
||||
if (ttm->state != ttm_unpopulated)
|
||||
return 0;
|
||||
|
||||
bm = &ttm->dev->bm;
|
||||
be = ttm->be;
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = ttm->pages[i];
|
||||
if (!page) {
|
||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
page = drm_alloc_gatt_pages(0);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
return -ENOMEM;
|
||||
}
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
SetPageLocked(page);
|
||||
#else
|
||||
SetPageReserved(page);
|
||||
#endif
|
||||
ttm->pages[i] = page;
|
||||
++bm->cur_pages;
|
||||
}
|
||||
}
|
||||
be->populate(be, ttm->num_pages, ttm->pages);
|
||||
ttm->state = ttm_unbound;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a ttm.
|
||||
*/
|
||||
|
||||
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
|
||||
{
|
||||
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
|
||||
drm_ttm_t *ttm;
|
||||
|
||||
if (!bo_driver)
|
||||
return NULL;
|
||||
|
||||
ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
|
||||
if (!ttm)
|
||||
return NULL;
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
INIT_LIST_HEAD(&ttm->p_mm_list);
|
||||
INIT_LIST_HEAD(&ttm->vma_list);
|
||||
#endif
|
||||
|
||||
ttm->dev = dev;
|
||||
atomic_set(&ttm->vma_count, 0);
|
||||
|
||||
ttm->destroy = 0;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
ttm->page_flags = 0;
|
||||
|
||||
/*
|
||||
* Account also for AGP module memory usage.
|
||||
*/
|
||||
|
||||
ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
|
||||
DRM_MEM_TTM);
|
||||
if (!ttm->pages) {
|
||||
drm_destroy_ttm(ttm);
|
||||
DRM_ERROR("Failed allocating page table\n");
|
||||
return NULL;
|
||||
}
|
||||
memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
|
||||
ttm->be = bo_driver->create_ttm_backend_entry(dev);
|
||||
if (!ttm->be) {
|
||||
drm_destroy_ttm(ttm);
|
||||
DRM_ERROR("Failed creating ttm backend entry\n");
|
||||
return NULL;
|
||||
}
|
||||
ttm->state = ttm_unpopulated;
|
||||
return ttm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unbind a ttm region from the aperture.
|
||||
*/
|
||||
|
||||
int drm_evict_ttm(drm_ttm_t * ttm)
|
||||
{
|
||||
drm_ttm_backend_t *be = ttm->be;
|
||||
int ret;
|
||||
|
||||
switch (ttm->state) {
|
||||
case ttm_bound:
|
||||
if (be->needs_ub_cache_adjust(be)) {
|
||||
ret = unmap_vma_pages(ttm);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
be->unbind(be);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ttm->state = ttm_evicted;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_fixup_ttm_caching(drm_ttm_t * ttm)
|
||||
{
|
||||
|
||||
if (ttm->state == ttm_evicted) {
|
||||
drm_ttm_backend_t *be = ttm->be;
|
||||
if (be->needs_ub_cache_adjust(be)) {
|
||||
drm_set_caching(ttm, 0);
|
||||
}
|
||||
ttm->state = ttm_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
int drm_unbind_ttm(drm_ttm_t * ttm)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ttm->state == ttm_bound)
|
||||
ret = drm_evict_ttm(ttm);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_fixup_ttm_caching(ttm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
|
||||
{
|
||||
|
||||
int ret = 0;
|
||||
drm_ttm_backend_t *be;
|
||||
|
||||
if (!ttm)
|
||||
return -EINVAL;
|
||||
if (ttm->state == ttm_bound)
|
||||
return 0;
|
||||
|
||||
be = ttm->be;
|
||||
|
||||
ret = drm_ttm_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ttm->state == ttm_unbound && !cached) {
|
||||
ret = unmap_vma_pages(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
||||
}
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
else if (ttm->state == ttm_evicted && !cached) {
|
||||
ret = drm_ttm_lock_mm(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
if ((ret = be->bind(be, aper_offset, cached))) {
|
||||
ttm->state = ttm_evicted;
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
if (be->needs_ub_cache_adjust(be))
|
||||
drm_ttm_unlock_mm(ttm);
|
||||
#endif
|
||||
DRM_ERROR("Couldn't bind backend.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm->aper_offset = aper_offset;
|
||||
ttm->state = ttm_bound;
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
if (be->needs_ub_cache_adjust(be)) {
|
||||
ret = drm_ttm_remap_bound(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* dev->struct_mutex locked.
|
||||
*/
|
||||
static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
|
||||
{
|
||||
drm_map_list_t *list = &object->map_list;
|
||||
drm_local_map_t *map;
|
||||
|
||||
if (list->user_token)
|
||||
drm_ht_remove_item(&dev->map_hash, &list->hash);
|
||||
|
||||
if (list->file_offset_node) {
|
||||
drm_mm_put_block(list->file_offset_node);
|
||||
list->file_offset_node = NULL;
|
||||
}
|
||||
|
||||
map = list->map;
|
||||
|
||||
if (map) {
|
||||
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
|
||||
if (ttm) {
|
||||
if (drm_destroy_ttm(ttm) != -EBUSY) {
|
||||
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
||||
}
|
||||
} else {
|
||||
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
||||
}
|
||||
}
|
||||
|
||||
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
|
||||
}
|
||||
|
||||
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
|
||||
{
|
||||
if (atomic_dec_and_test(&to->usage)) {
|
||||
drm_ttm_object_remove(dev, to);
|
||||
}
|
||||
}
|
||||
|
||||
void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
|
||||
{
|
||||
if (atomic_dec_and_test(&to->usage)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (atomic_read(&to->usage) == 0)
|
||||
drm_ttm_object_remove(dev, to);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a ttm and add it to the drm book-keeping.
|
||||
* dev->struct_mutex locked.
|
||||
*/
|
||||
|
||||
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
|
||||
uint32_t flags, drm_ttm_object_t ** ttm_object)
|
||||
{
|
||||
drm_ttm_object_t *object;
|
||||
drm_map_list_t *list;
|
||||
drm_local_map_t *map;
|
||||
drm_ttm_t *ttm;
|
||||
|
||||
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
|
||||
if (!object)
|
||||
return -ENOMEM;
|
||||
object->flags = flags;
|
||||
list = &object->map_list;
|
||||
|
||||
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
|
||||
if (!list->map) {
|
||||
drm_ttm_object_remove(dev, object);
|
||||
return -ENOMEM;
|
||||
}
|
||||
map = list->map;
|
||||
|
||||
ttm = drm_init_ttm(dev, size);
|
||||
if (!ttm) {
|
||||
DRM_ERROR("Could not create ttm\n");
|
||||
drm_ttm_object_remove(dev, object);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
map->offset = (unsigned long)ttm;
|
||||
map->type = _DRM_TTM;
|
||||
map->flags = _DRM_REMOVABLE;
|
||||
map->size = ttm->num_pages * PAGE_SIZE;
|
||||
map->handle = (void *)object;
|
||||
|
||||
/*
|
||||
* Add a one-page "hole" to the block size to avoid the mm subsystem
|
||||
* merging vmas.
|
||||
* FIXME: Is this really needed?
|
||||
*/
|
||||
|
||||
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
|
||||
ttm->num_pages + 1, 0, 0);
|
||||
if (!list->file_offset_node) {
|
||||
drm_ttm_object_remove(dev, object);
|
||||
return -ENOMEM;
|
||||
}
|
||||
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
|
||||
ttm->num_pages + 1, 0);
|
||||
|
||||
list->hash.key = list->file_offset_node->start;
|
||||
|
||||
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
|
||||
drm_ttm_object_remove(dev, object);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
|
||||
ttm->mapping_offset = list->hash.key;
|
||||
atomic_set(&object->usage, 1);
|
||||
*ttm_object = object;
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#ifndef _DRM_TTM_H
|
||||
#define _DRM_TTM_H
|
||||
#define DRM_HAS_TTM
|
||||
|
||||
/*
|
||||
* The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
|
||||
* needs only to implement these functions to be usable with the "TTM" interface.
|
||||
* The AGP backend implementation lives in drm_agpsupport.c
|
||||
* basically maps these calls to available functions in agpgart. Each drm device driver gets an
|
||||
* additional function pointer that creates these types,
|
||||
* so that the device can choose the correct aperture.
|
||||
* (Multiple AGP apertures, etc.)
|
||||
* Most device drivers will let this point to the standard AGP implementation.
|
||||
*/
|
||||
|
||||
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
|
||||
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
|
||||
#define DRM_BE_FLAG_CBA 0x00000004
|
||||
|
||||
typedef struct drm_ttm_backend {
|
||||
unsigned long aperture_base;
|
||||
void *private;
|
||||
uint32_t flags;
|
||||
uint32_t drm_map_type;
|
||||
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
|
||||
int (*populate) (struct drm_ttm_backend * backend,
|
||||
unsigned long num_pages, struct page ** pages);
|
||||
void (*clear) (struct drm_ttm_backend * backend);
|
||||
int (*bind) (struct drm_ttm_backend * backend,
|
||||
unsigned long offset, int cached);
|
||||
int (*unbind) (struct drm_ttm_backend * backend);
|
||||
void (*destroy) (struct drm_ttm_backend * backend);
|
||||
} drm_ttm_backend_t;
|
||||
|
||||
typedef struct drm_ttm {
|
||||
struct page **pages;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
unsigned long aper_offset;
|
||||
atomic_t vma_count;
|
||||
struct drm_device *dev;
|
||||
int destroy;
|
||||
uint32_t mapping_offset;
|
||||
drm_ttm_backend_t *be;
|
||||
enum {
|
||||
ttm_bound,
|
||||
ttm_evicted,
|
||||
ttm_unbound,
|
||||
ttm_unpopulated,
|
||||
} state;
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
struct list_head vma_list;
|
||||
struct list_head p_mm_list;
|
||||
#endif
|
||||
|
||||
} drm_ttm_t;
|
||||
|
||||
typedef struct drm_ttm_object {
|
||||
atomic_t usage;
|
||||
uint32_t flags;
|
||||
drm_map_list_t map_list;
|
||||
} drm_ttm_object_t;
|
||||
|
||||
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
|
||||
uint32_t flags,
|
||||
drm_ttm_object_t ** ttm_object);
|
||||
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
|
||||
drm_ttm_object_t * to);
|
||||
extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
|
||||
drm_ttm_object_t * to);
|
||||
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
|
||||
uint32_t handle,
|
||||
int check_owner);
|
||||
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
|
||||
|
||||
extern int drm_unbind_ttm(drm_ttm_t * ttm);
|
||||
|
||||
/*
|
||||
* Evict a ttm region. Keeps Aperture caching policy.
|
||||
*/
|
||||
|
||||
extern int drm_evict_ttm(drm_ttm_t * ttm);
|
||||
extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
|
||||
|
||||
/*
|
||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
||||
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
|
||||
* when the last vma exits.
|
||||
*/
|
||||
|
||||
extern int drm_destroy_ttm(drm_ttm_t * ttm);
|
||||
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
|
||||
|
||||
static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
|
||||
{
|
||||
return (drm_ttm_t *) to->map_list.map->offset;
|
||||
}
|
||||
|
||||
#define DRM_MASK_VAL(dest, mask, val) \
|
||||
(dest) = ((dest) & ~(mask)) | ((val) & (mask));
|
||||
|
||||
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
|
||||
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
|
||||
|
||||
/*
|
||||
* Page flags.
|
||||
*/
|
||||
|
||||
#define DRM_TTM_PAGE_UNCACHED 0x01
|
||||
#define DRM_TTM_PAGE_USED 0x02
|
||||
#define DRM_TTM_PAGE_BOUND 0x04
|
||||
#define DRM_TTM_PAGE_PRESENT 0x08
|
||||
|
||||
#endif
|
|
@ -34,12 +34,42 @@
|
|||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#if defined(__ia64__)
|
||||
#include <linux/efi.h>
|
||||
#endif
|
||||
|
||||
static void drm_vm_open(struct vm_area_struct *vma);
|
||||
static void drm_vm_close(struct vm_area_struct *vma);
|
||||
static void drm_vm_ttm_close(struct vm_area_struct *vma);
|
||||
static int drm_vm_ttm_open(struct vm_area_struct *vma);
|
||||
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
|
||||
|
||||
|
||||
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
|
||||
{
|
||||
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
|
||||
pgprot_val(tmp) |= _PAGE_PCD;
|
||||
pgprot_val(tmp) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__powerpc__)
|
||||
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
||||
if (map_type == _DRM_REGISTERS)
|
||||
pgprot_val(tmp) |= _PAGE_GUARDED;
|
||||
#endif
|
||||
#if defined(__ia64__)
|
||||
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
|
||||
vma->vm_start))
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \c nopage method for AGP virtual memory.
|
||||
|
@ -59,7 +89,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
drm_device_t *dev = priv->head->dev;
|
||||
drm_map_t *map = NULL;
|
||||
drm_map_list_t *r_list;
|
||||
drm_hash_item_t *hash;
|
||||
drm_hash_item_t *hash;
|
||||
|
||||
/*
|
||||
* Find the right map
|
||||
|
@ -70,10 +100,10 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
if (!dev->agp || !dev->agp->cant_use_aperture)
|
||||
goto vm_nopage_error;
|
||||
|
||||
if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash))
|
||||
goto vm_nopage_error;
|
||||
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
|
||||
goto vm_nopage_error;
|
||||
|
||||
r_list = drm_hash_entry(hash, drm_map_list_t, hash);
|
||||
r_list = drm_hash_entry(hash, drm_map_list_t, hash);
|
||||
map = r_list->map;
|
||||
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
|
@ -129,6 +159,95 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
}
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \
|
||||
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
|
||||
static
|
||||
#endif
|
||||
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
||||
struct fault_data *data)
|
||||
{
|
||||
unsigned long address = data->address;
|
||||
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
struct page *page;
|
||||
drm_ttm_t *ttm;
|
||||
drm_buffer_manager_t *bm;
|
||||
drm_device_t *dev;
|
||||
unsigned long pfn;
|
||||
int err;
|
||||
pgprot_t pgprot;
|
||||
|
||||
if (!map) {
|
||||
data->type = VM_FAULT_OOM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (address > vma->vm_end) {
|
||||
data->type = VM_FAULT_SIGBUS;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ttm = (drm_ttm_t *) map->offset;
|
||||
|
||||
dev = ttm->dev;
|
||||
|
||||
/*
|
||||
* Perhaps retry here?
|
||||
*/
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_fixup_ttm_caching(ttm);
|
||||
|
||||
bm = &dev->bm;
|
||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
page = ttm->pages[page_offset];
|
||||
|
||||
if (!page) {
|
||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||
data->type = VM_FAULT_OOM;
|
||||
goto out;
|
||||
}
|
||||
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
data->type = VM_FAULT_OOM;
|
||||
goto out;
|
||||
}
|
||||
++bm->cur_pages;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
SetPageLocked(page);
|
||||
#else
|
||||
SetPageReserved(page);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
|
||||
|
||||
/*
|
||||
* FIXME: Check can't map aperture flag.
|
||||
*/
|
||||
|
||||
pfn = ttm->aper_offset + page_offset +
|
||||
(ttm->be->aperture_base >> PAGE_SHIFT);
|
||||
pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
|
||||
} else {
|
||||
pfn = page_to_pfn(page);
|
||||
pgprot = vma->vm_page_prot;
|
||||
}
|
||||
|
||||
err = vm_insert_pfn(vma, address, pfn, pgprot);
|
||||
|
||||
if (!err || err == -EBUSY)
|
||||
data->type = VM_FAULT_MINOR;
|
||||
else
|
||||
data->type = VM_FAULT_OOM;
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* \c nopage method for shared virtual memory.
|
||||
*
|
||||
|
@ -198,7 +317,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
} else {
|
||||
dev->vmalist = pt->next;
|
||||
}
|
||||
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
} else {
|
||||
prev = pt;
|
||||
}
|
||||
|
@ -243,6 +362,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
dmah.size = map->size;
|
||||
__drm_pci_free(dev, &dmah);
|
||||
break;
|
||||
case _DRM_TTM:
|
||||
BUG_ON(1);
|
||||
break;
|
||||
}
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
|
@ -358,6 +480,7 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
return drm_do_vm_sg_nopage(vma, address);
|
||||
}
|
||||
|
||||
|
||||
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
|
||||
|
||||
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
|
||||
|
@ -414,6 +537,20 @@ static struct vm_operations_struct drm_vm_sg_ops = {
|
|||
.close = drm_vm_close,
|
||||
};
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
|
||||
static struct vm_operations_struct drm_vm_ttm_ops = {
|
||||
.nopage = drm_vm_ttm_nopage,
|
||||
.open = drm_vm_ttm_open_wrapper,
|
||||
.close = drm_vm_ttm_close,
|
||||
};
|
||||
#else
|
||||
static struct vm_operations_struct drm_vm_ttm_ops = {
|
||||
.fault = drm_vm_ttm_fault,
|
||||
.open = drm_vm_ttm_open_wrapper,
|
||||
.close = drm_vm_ttm_close,
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* \c open method for shared virtual memory.
|
||||
*
|
||||
|
@ -432,7 +569,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
|
|||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
atomic_inc(&dev->vma_count);
|
||||
|
||||
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
|
||||
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
|
||||
if (vma_entry) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
vma_entry->vma = vma;
|
||||
|
@ -443,6 +580,29 @@ static void drm_vm_open(struct vm_area_struct *vma)
|
|||
}
|
||||
}
|
||||
|
||||
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
|
||||
|
||||
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
|
||||
drm_ttm_t *ttm;
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
|
||||
drm_vm_open(vma);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ttm = (drm_ttm_t *) map->offset;
|
||||
atomic_inc(&ttm->vma_count);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_ttm_add_vma(ttm, vma);
|
||||
#endif
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_vm_ttm_open(vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* \c close method for all virtual memory types.
|
||||
*
|
||||
|
@ -469,13 +629,42 @@ static void drm_vm_close(struct vm_area_struct *vma)
|
|||
} else {
|
||||
dev->vmalist = pt->next;
|
||||
}
|
||||
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
||||
static void drm_vm_ttm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
|
||||
drm_ttm_t *ttm;
|
||||
drm_device_t *dev;
|
||||
int ret;
|
||||
|
||||
drm_vm_close(vma);
|
||||
if (map) {
|
||||
ttm = (drm_ttm_t *) map->offset;
|
||||
dev = ttm->dev;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_ttm_delete_vma(ttm, vma);
|
||||
#endif
|
||||
if (atomic_dec_and_test(&ttm->vma_count)) {
|
||||
if (ttm->destroy) {
|
||||
ret = drm_destroy_ttm(ttm);
|
||||
BUG_ON(ret);
|
||||
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* mmap DMA memory.
|
||||
*
|
||||
|
@ -496,8 +685,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
|||
lock_kernel();
|
||||
dev = priv->head->dev;
|
||||
dma = dev->dma;
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, vma->vm_pgoff);
|
||||
|
||||
/* Length must match exact page count */
|
||||
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
|
||||
|
@ -506,6 +695,22 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
|||
}
|
||||
unlock_kernel();
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) &&
|
||||
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
/* Ye gads this is ugly. With more thought
|
||||
we could move this up higher and use
|
||||
`protection_map' instead. */
|
||||
vma->vm_page_prot =
|
||||
__pgprot(pte_val
|
||||
(pte_wrprotect
|
||||
(__pte(pgprot_val(vma->vm_page_prot)))));
|
||||
#endif
|
||||
}
|
||||
|
||||
vma->vm_ops = &drm_vm_dma_ops;
|
||||
|
||||
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
|
||||
|
@ -554,10 +759,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
drm_device_t *dev = priv->head->dev;
|
||||
drm_map_t *map = NULL;
|
||||
unsigned long offset = 0;
|
||||
drm_hash_item_t *hash;
|
||||
drm_hash_item_t *hash;
|
||||
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, vma->vm_pgoff);
|
||||
|
||||
if (!priv->authenticated)
|
||||
return -EACCES;
|
||||
|
@ -566,7 +771,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
* the AGP mapped at physical address 0
|
||||
* --BenH.
|
||||
*/
|
||||
if (!VM_OFFSET(vma)
|
||||
if (!vma->vm_pgoff
|
||||
#if __OS_HAS_AGP
|
||||
&& (!dev->agp
|
||||
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
|
||||
|
@ -574,11 +779,11 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
)
|
||||
return drm_mmap_dma(filp, vma);
|
||||
|
||||
if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) {
|
||||
DRM_ERROR("Could not find map\n");
|
||||
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
|
||||
DRM_ERROR("Could not find map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
map = drm_hash_entry(hash,drm_map_list_t, hash)->map;
|
||||
|
||||
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
|
||||
|
@ -620,27 +825,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
/* fall through to _DRM_FRAME_BUFFER... */
|
||||
case _DRM_FRAME_BUFFER:
|
||||
case _DRM_REGISTERS:
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__powerpc__)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
|
||||
#endif
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
#if defined(__ia64__)
|
||||
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
|
||||
vma->vm_start))
|
||||
vma->vm_page_prot =
|
||||
pgprot_writecombine(vma->vm_page_prot);
|
||||
else
|
||||
vma->vm_page_prot =
|
||||
pgprot_noncached(vma->vm_page_prot);
|
||||
#endif
|
||||
offset = dev->driver->get_reg_ofs(dev);
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
vma->vm_page_prot = drm_io_prot(map->type, vma);
|
||||
#ifdef __sparc__
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
(map->offset + offset) >>PAGE_SHIFT,
|
||||
|
@ -687,6 +874,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_flags |= VM_RESERVED;
|
||||
#endif
|
||||
break;
|
||||
case _DRM_TTM: {
|
||||
vma->vm_ops = &drm_vm_ttm_ops;
|
||||
vma->vm_private_data = (void *) map;
|
||||
vma->vm_file = filp;
|
||||
vma->vm_flags |= VM_RESERVED | VM_IO;
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_ttm_map_bound(vma);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
#endif
|
||||
if (drm_vm_ttm_open(vma))
|
||||
return -EAGAIN;
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
return -EINVAL; /* This should never happen. */
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/shmparam.h>
|
||||
|
|
|
@ -151,7 +151,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
|
|||
drm_device_t *dev = priv->head->dev;
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
struct file_operations *old_fops;
|
||||
const struct file_operations *old_fops;
|
||||
int retcode = 0;
|
||||
|
||||
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
|
||||
|
@ -166,10 +166,10 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
|
|||
MAP_SHARED, buf->bus_address);
|
||||
dev_priv->mmap_buffer = NULL;
|
||||
filp->f_op = old_fops;
|
||||
if ((unsigned long)buf_priv->virtual > -1024UL) {
|
||||
if (IS_ERR(buf_priv->virtual)) {
|
||||
/* Real error */
|
||||
DRM_ERROR("mmap error\n");
|
||||
retcode = (signed int)buf_priv->virtual;
|
||||
retcode = PTR_ERR(buf_priv->virtual);
|
||||
buf_priv->virtual = NULL;
|
||||
}
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
@ -833,7 +833,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev,
|
|||
((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
|
||||
|
||||
if (used & 4) {
|
||||
*(u32 *) ((u32) buf_priv->kernel_virtual + used) = 0;
|
||||
*(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
|
||||
used += 4;
|
||||
}
|
||||
|
||||
|
@ -1191,7 +1191,7 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
|
|||
|
||||
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
|
||||
if (used & 4) {
|
||||
*(u32 *) ((u32) buf_priv->virtual + used) = 0;
|
||||
*(u32 *) ((char *) buf_priv->virtual + used) = 0;
|
||||
used += 4;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i810_drm.h"
|
||||
|
|
|
@ -141,8 +141,8 @@ extern int i810_max_ioctl;
|
|||
volatile char *virt;
|
||||
|
||||
#define BEGIN_LP_RING(n) do { \
|
||||
if (I810_VERBOSE) \
|
||||
DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
|
||||
if (I810_VERBOSE) \
|
||||
DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__);\
|
||||
if (dev_priv->ring.space < n*4) \
|
||||
i810_wait_ring(dev, n*4); \
|
||||
dev_priv->ring.space -= n*4; \
|
||||
|
@ -151,17 +151,17 @@ extern int i810_max_ioctl;
|
|||
virt = dev_priv->ring.virtual_start; \
|
||||
} while (0)
|
||||
|
||||
#define ADVANCE_LP_RING() do { \
|
||||
#define ADVANCE_LP_RING() do { \
|
||||
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
|
||||
dev_priv->ring.tail = outring; \
|
||||
I810_WRITE(LP_RING + RING_TAIL, outring); \
|
||||
dev_priv->ring.tail = outring; \
|
||||
I810_WRITE(LP_RING + RING_TAIL, outring); \
|
||||
} while(0)
|
||||
|
||||
#define OUT_RING(n) do { \
|
||||
#define OUT_RING(n) do { \
|
||||
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
|
||||
*(volatile unsigned int *)(virt + outring) = n; \
|
||||
outring += 4; \
|
||||
outring &= ringmask; \
|
||||
*(volatile unsigned int *)(virt + outring) = n; \
|
||||
outring += 4; \
|
||||
outring &= ringmask; \
|
||||
} while (0)
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
|
@ -193,7 +193,7 @@ extern int i810_max_ioctl;
|
|||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x00FFFFF8
|
||||
#define START_ADDR 0x00FFFFF8
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x000FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
|
|
|
@ -137,7 +137,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
|
|||
drm_device_t *dev = priv->head->dev;
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
struct file_operations *old_fops;
|
||||
const struct file_operations *old_fops;
|
||||
unsigned long virtual;
|
||||
int retcode = 0;
|
||||
|
||||
|
@ -155,7 +155,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
|
|||
if (IS_ERR((void *)virtual)) { /* ugh */
|
||||
/* Real error */
|
||||
DRM_ERROR("mmap error\n");
|
||||
retcode = virtual;
|
||||
retcode = PTR_ERR((void *)virtual);
|
||||
buf_priv->virtual = NULL;
|
||||
} else {
|
||||
buf_priv->virtual = (void __user *)virtual;
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i830_drm.h"
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
||||
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
|
||||
{
|
||||
return drm_agp_init_ttm(dev, NULL);
|
||||
}
|
||||
|
||||
int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
|
||||
{
|
||||
*class = 0;
|
||||
if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
*type = 3;
|
||||
else
|
||||
*type = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
|
||||
{
|
||||
/*
|
||||
* FIXME: Only emit once per batchbuffer submission.
|
||||
*/
|
||||
|
||||
uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
|
||||
|
||||
if (flags & DRM_BO_FLAG_READ)
|
||||
flush_cmd |= MI_READ_FLUSH;
|
||||
if (flags & DRM_BO_FLAG_EXE)
|
||||
flush_cmd |= MI_EXE_FLUSH;
|
||||
|
||||
return i915_emit_mi_flush(dev, flush_cmd);
|
||||
}
|
|
@ -38,6 +38,27 @@ static struct pci_device_id pciidlist[] = {
|
|||
i915_PCI_IDS
|
||||
};
|
||||
|
||||
#ifdef I915_HAVE_FENCE
|
||||
static drm_fence_driver_t i915_fence_driver = {
|
||||
.no_types = 2,
|
||||
.wrap_diff = (1 << 30),
|
||||
.flush_diff = (1 << 29),
|
||||
.sequence_mask = 0xffffffffU,
|
||||
.lazy_capable = 1,
|
||||
.emit = i915_fence_emit_sequence,
|
||||
.poke_flush = i915_poke_flush,
|
||||
};
|
||||
#endif
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
static drm_bo_driver_t i915_bo_driver = {
|
||||
.iomap = {NULL, NULL},
|
||||
.cached = {1, 1},
|
||||
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
|
||||
.fence_type = i915_fence_types,
|
||||
.invalidate_caches = i915_invalidate_caches
|
||||
};
|
||||
#endif
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static struct drm_driver driver = {
|
||||
/* don't use mtrr's here, the Xserver or user space app should
|
||||
|
@ -45,12 +66,14 @@ static struct drm_driver driver = {
|
|||
*/
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
|
||||
DRIVER_IRQ_VBL2,
|
||||
.load = i915_driver_load,
|
||||
.lastclose = i915_driver_lastclose,
|
||||
.preclose = i915_driver_preclose,
|
||||
.device_is_agp = i915_driver_device_is_agp,
|
||||
.vblank_wait = i915_driver_vblank_wait,
|
||||
.vblank_wait2 = i915_driver_vblank_wait2,
|
||||
.irq_preinstall = i915_driver_irq_preinstall,
|
||||
.irq_postinstall = i915_driver_irq_postinstall,
|
||||
.irq_uninstall = i915_driver_irq_uninstall,
|
||||
|
@ -77,7 +100,12 @@ static struct drm_driver driver = {
|
|||
.probe = probe,
|
||||
.remove = __devexit_p(drm_cleanup_pci),
|
||||
},
|
||||
|
||||
#ifdef I915_HAVE_FENCE
|
||||
.fence_driver = &i915_fence_driver,
|
||||
#endif
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
.bo_driver = &i915_bo_driver,
|
||||
#endif
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
/*
|
||||
* Implements an intel sync flush operation.
|
||||
*/
|
||||
|
||||
static void i915_perform_flush(drm_device_t * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
uint32_t flush_flags = 0;
|
||||
uint32_t flush_sequence = 0;
|
||||
uint32_t i_status;
|
||||
uint32_t diff;
|
||||
uint32_t sequence;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
if (fm->pending_exe_flush) {
|
||||
sequence = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
/*
|
||||
* First update fences with the current breadcrumb.
|
||||
*/
|
||||
|
||||
diff = sequence - fm->last_exe_flush;
|
||||
if (diff < driver->wrap_diff && diff != 0) {
|
||||
drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
|
||||
}
|
||||
|
||||
diff = sequence - fm->exe_flush_sequence;
|
||||
if (diff < driver->wrap_diff) {
|
||||
fm->pending_exe_flush = 0;
|
||||
if (dev_priv->fence_irq_on) {
|
||||
i915_user_irq_off(dev_priv);
|
||||
dev_priv->fence_irq_on = 0;
|
||||
}
|
||||
} else if (!dev_priv->fence_irq_on) {
|
||||
i915_user_irq_on(dev_priv);
|
||||
dev_priv->fence_irq_on = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev_priv->flush_pending) {
|
||||
i_status = READ_HWSP(dev_priv, 0);
|
||||
if ((i_status & (1 << 12)) !=
|
||||
(dev_priv->saved_flush_status & (1 << 12))) {
|
||||
flush_flags = dev_priv->flush_flags;
|
||||
flush_sequence = dev_priv->flush_sequence;
|
||||
dev_priv->flush_pending = 0;
|
||||
drm_fence_handler(dev, flush_sequence, flush_flags);
|
||||
}
|
||||
}
|
||||
|
||||
if (fm->pending_flush && !dev_priv->flush_pending) {
|
||||
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
|
||||
dev_priv->flush_flags = fm->pending_flush;
|
||||
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
|
||||
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
|
||||
dev_priv->flush_pending = 1;
|
||||
fm->pending_flush = 0;
|
||||
}
|
||||
|
||||
if (dev_priv->flush_pending) {
|
||||
i_status = READ_HWSP(dev_priv, 0);
|
||||
if ((i_status & (1 << 12)) !=
|
||||
(dev_priv->saved_flush_status & (1 << 12))) {
|
||||
flush_flags = dev_priv->flush_flags;
|
||||
flush_sequence = dev_priv->flush_sequence;
|
||||
dev_priv->flush_pending = 0;
|
||||
drm_fence_handler(dev, flush_sequence, flush_flags);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void i915_poke_flush(drm_device_t * dev)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
i915_perform_flush(dev);
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
|
||||
uint32_t * sequence, uint32_t * native_type)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
return -EINVAL;
|
||||
|
||||
i915_emit_irq(dev);
|
||||
*sequence = (uint32_t) dev_priv->counter;
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
|
||||
*native_type |= DRM_I915_FENCE_TYPE_RW;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_fence_handler(drm_device_t * dev)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
write_lock(&fm->lock);
|
||||
i915_perform_flush(dev);
|
||||
write_unlock(&fm->lock);
|
||||
}
|
|
@ -22,7 +22,6 @@
|
|||
|
||||
/* derived from tdfx_drv.c */
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "imagine_drv.h"
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
* Leif Delgass <ldelgass@retinalburn.net>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "mach64_drm.h"
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "mga_drm.h"
|
||||
|
@ -49,6 +48,7 @@ static struct drm_driver driver = {
|
|||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
|
||||
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.dev_priv_size = sizeof (drm_mga_buf_priv_t),
|
||||
.load = mga_driver_load,
|
||||
.unload = mga_driver_unload,
|
||||
.lastclose = mga_driver_lastclose,
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
* Lars Knoll <lars@trolltech.com>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "nv_drv.h"
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "r128_drm.h"
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "radeon_drm.h"
|
||||
|
@ -45,7 +44,7 @@ module_param_named(no_wb, radeon_no_wb, int, 0444);
|
|||
static int dri_library_name(struct drm_device * dev, char * buf)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
int family = dev_priv->flags & CHIP_FAMILY_MASK;
|
||||
int family = dev_priv->flags & RADEON_FAMILY_MASK;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(family < CHIP_R200) ? "radeon" :
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "savage_drm.h"
|
||||
#include "savage_drv.h"
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "sis_drm.h"
|
||||
#include "sis_drv.h"
|
||||
|
@ -40,15 +39,15 @@ static struct pci_device_id pciidlist[] = {
|
|||
static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
|
||||
{
|
||||
drm_sis_private_t *dev_priv;
|
||||
int ret;
|
||||
int ret;
|
||||
|
||||
dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
dev_priv->chipset = chipset;
|
||||
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
|
||||
dev_priv->chipset = chipset;
|
||||
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
|
||||
if (ret) {
|
||||
drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
|
||||
}
|
||||
|
@ -60,7 +59,7 @@ static int sis_driver_unload(drm_device_t *dev)
|
|||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
drm_sman_takedown(&dev_priv->sman);
|
||||
drm_sman_takedown(&dev_priv->sman);
|
||||
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
|
||||
|
||||
return 0;
|
||||
|
@ -70,10 +69,10 @@ static int sis_driver_unload(drm_device_t *dev)
|
|||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static struct drm_driver driver = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
|
||||
.load = sis_driver_load,
|
||||
.unload = sis_driver_unload,
|
||||
.load = sis_driver_load,
|
||||
.unload = sis_driver_unload,
|
||||
.context_dtor = NULL,
|
||||
.dma_quiescent = sis_idle,
|
||||
.dma_quiescent = sis_idle,
|
||||
.reclaim_buffers = NULL,
|
||||
.reclaim_buffers_locked = sis_reclaim_buffers_locked,
|
||||
.lastclose = sis_lastclose,
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "tdfx_drv.h"
|
||||
|
||||
|
|
|
@ -121,19 +121,18 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
|
||||
while (line_len > 0) {
|
||||
|
||||
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
|
||||
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
|
||||
line_len -= remaining_len;
|
||||
|
||||
if (mode == 1) {
|
||||
desc_ptr->mem_addr =
|
||||
dma_map_page(&pdev->dev,
|
||||
vsg->pages[VIA_PFN(cur_mem) -
|
||||
VIA_PFN(first_addr)],
|
||||
VIA_PGOFF(cur_mem), remaining_len,
|
||||
vsg->direction);
|
||||
desc_ptr->dev_addr = cur_fb;
|
||||
desc_ptr->mem_addr = dma_map_page(&pdev->dev,
|
||||
vsg->pages[VIA_PFN(cur_mem) -
|
||||
VIA_PFN(first_addr)],
|
||||
VIA_PGOFF(cur_mem), remaining_len,
|
||||
vsg->direction);
|
||||
desc_ptr->dev_addr = cur_fb;
|
||||
|
||||
desc_ptr->size = remaining_len;
|
||||
desc_ptr->size = remaining_len;
|
||||
desc_ptr->next = (uint32_t) next;
|
||||
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -167,7 +166,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
*/
|
||||
|
||||
|
||||
void
|
||||
static void
|
||||
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
{
|
||||
struct page *page;
|
||||
|
@ -648,13 +647,13 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
|
|||
if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
|
||||
((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
#else
|
||||
if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
|
||||
((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -732,7 +731,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
|
|||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_sg_info_t *vsg;
|
||||
drm_via_blitq_t *blitq;
|
||||
int ret;
|
||||
int ret;
|
||||
int engine;
|
||||
unsigned long irqsave;
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#ifndef _VIA_DMABLIT_H
|
||||
#define _VIA_DMABLIT_H
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#define VIA_NUM_BLIT_ENGINES 2
|
||||
#define VIA_NUM_BLIT_SLOTS 8
|
||||
|
||||
|
@ -43,12 +45,12 @@ typedef struct _drm_via_sg_info {
|
|||
int num_desc;
|
||||
enum dma_data_direction direction;
|
||||
unsigned char *bounce_buffer;
|
||||
dma_addr_t chain_start;
|
||||
dma_addr_t chain_start;
|
||||
uint32_t free_on_sequence;
|
||||
unsigned int descriptors_per_page;
|
||||
unsigned int descriptors_per_page;
|
||||
int aborted;
|
||||
enum {
|
||||
dr_via_device_mapped,
|
||||
dr_via_device_mapped,
|
||||
dr_via_desc_pages_alloc,
|
||||
dr_via_pages_locked,
|
||||
dr_via_pages_alloc,
|
||||
|
@ -66,7 +68,7 @@ typedef struct _drm_via_blitq {
|
|||
unsigned num_free;
|
||||
unsigned num_outstanding;
|
||||
unsigned long end;
|
||||
int aborting;
|
||||
int aborting;
|
||||
int is_active;
|
||||
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
|
||||
spinlock_t blit_lock;
|
||||
|
|
|
@ -69,9 +69,6 @@
|
|||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
#if defined(__KERNEL__)
|
||||
#include <linux/config.h>
|
||||
#endif
|
||||
#include <asm/ioctl.h> /* For _IO* macros */
|
||||
#define DRM_IOCTL_NR(n) _IOC_NR(n)
|
||||
#define DRM_IOC_VOID _IOC_NONE
|
||||
|
@ -134,8 +131,16 @@
|
|||
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
||||
|
||||
#if defined(__linux__)
|
||||
#if defined(__KERNEL__)
|
||||
typedef __u64 drm_u64_t;
|
||||
#else
|
||||
typedef unsigned long long drm_u64_t;
|
||||
#endif
|
||||
|
||||
typedef unsigned int drm_handle_t;
|
||||
#else
|
||||
#include <sys/types.h>
|
||||
typedef u_int64_t drm_u64_t;
|
||||
typedef unsigned long drm_handle_t; /**< To mapped regions */
|
||||
#endif
|
||||
typedef unsigned int drm_context_t; /**< GLXContext handle */
|
||||
|
@ -158,6 +163,14 @@ typedef struct drm_clip_rect {
|
|||
unsigned short y2;
|
||||
} drm_clip_rect_t;
|
||||
|
||||
/**
|
||||
* Drawable information.
|
||||
*/
|
||||
typedef struct drm_drawable_info {
|
||||
unsigned int num_rects;
|
||||
drm_clip_rect_t *rects;
|
||||
} drm_drawable_info_t;
|
||||
|
||||
/**
|
||||
* Texture region,
|
||||
*/
|
||||
|
@ -259,7 +272,8 @@ typedef enum drm_map_type {
|
|||
_DRM_SHM = 2, /**< shared, cached */
|
||||
_DRM_AGP = 3, /**< AGP/GART */
|
||||
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
|
||||
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
|
||||
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
|
||||
_DRM_TTM = 6
|
||||
} drm_map_type_t;
|
||||
|
||||
/**
|
||||
|
@ -408,7 +422,8 @@ typedef struct drm_buf_desc {
|
|||
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
|
||||
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
|
||||
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
|
||||
_DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */
|
||||
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
|
||||
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
|
||||
} flags;
|
||||
unsigned long agp_start; /**<
|
||||
* Start address of where the AGP buffers are
|
||||
|
@ -507,6 +522,20 @@ typedef struct drm_draw {
|
|||
drm_drawable_t handle;
|
||||
} drm_draw_t;
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
|
||||
*/
|
||||
typedef enum {
|
||||
DRM_DRAWABLE_CLIPRECTS,
|
||||
} drm_drawable_info_type_t;
|
||||
|
||||
typedef struct drm_update_draw {
|
||||
drm_drawable_t handle;
|
||||
unsigned int type;
|
||||
unsigned int num;
|
||||
unsigned long long data;
|
||||
} drm_update_draw_t;
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
|
||||
*/
|
||||
|
@ -529,10 +558,14 @@ typedef struct drm_irq_busid {
|
|||
typedef enum {
|
||||
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
||||
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
||||
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
|
||||
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
|
||||
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
|
||||
} drm_vblank_seq_type_t;
|
||||
|
||||
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
|
||||
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
|
||||
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
|
||||
_DRM_VBLANK_NEXTONMISS)
|
||||
|
||||
struct drm_wait_vblank_request {
|
||||
drm_vblank_seq_type_t type;
|
||||
|
@ -629,6 +662,190 @@ typedef struct drm_set_version {
|
|||
int drm_dd_minor;
|
||||
} drm_set_version_t;
|
||||
|
||||
|
||||
#define DRM_FENCE_FLAG_EMIT 0x00000001
|
||||
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
||||
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
||||
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
|
||||
|
||||
/* Reserved for driver use */
|
||||
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
||||
|
||||
#define DRM_FENCE_TYPE_EXE 0x00000001
|
||||
|
||||
typedef struct drm_fence_arg {
|
||||
unsigned handle;
|
||||
int class;
|
||||
unsigned type;
|
||||
unsigned flags;
|
||||
unsigned signaled;
|
||||
unsigned expand_pad[4]; /*Future expansion */
|
||||
enum {
|
||||
drm_fence_create,
|
||||
drm_fence_destroy,
|
||||
drm_fence_reference,
|
||||
drm_fence_unreference,
|
||||
drm_fence_signaled,
|
||||
drm_fence_flush,
|
||||
drm_fence_wait,
|
||||
drm_fence_emit,
|
||||
drm_fence_buffers
|
||||
} op;
|
||||
} drm_fence_arg_t;
|
||||
|
||||
/* Buffer permissions, referring to how the GPU uses the buffers.
|
||||
these translate to fence types used for the buffers.
|
||||
Typically a texture buffer is read, A destination buffer is write and
|
||||
a command (batch-) buffer is exe. Can be or-ed together. */
|
||||
|
||||
#define DRM_BO_FLAG_READ 0x00000001
|
||||
#define DRM_BO_FLAG_WRITE 0x00000002
|
||||
#define DRM_BO_FLAG_EXE 0x00000004
|
||||
|
||||
/*
|
||||
* Status flags. Can be read to determine the actual state of a buffer.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Cannot evict this buffer. Not even with force. This type of buffer should
|
||||
* only be available for root, and must be manually removed before buffer
|
||||
* manager shutdown or swapout.
|
||||
*/
|
||||
#define DRM_BO_FLAG_NO_EVICT 0x00000010
|
||||
/* Always keep a system memory shadow to a vram buffer */
|
||||
#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020
|
||||
/* The buffer is shareable with other processes */
|
||||
#define DRM_BO_FLAG_SHAREABLE 0x00000040
|
||||
/* The buffer is currently cached */
|
||||
#define DRM_BO_FLAG_CACHED 0x00000080
|
||||
/* Make sure that every time this buffer is validated, it ends up on the same
|
||||
* location. The buffer will also not be evicted when claiming space for
|
||||
* other buffers. Basically a pinned buffer but it may be thrown out as
|
||||
* part of buffer manager shutdown or swapout. Not supported yet.*/
|
||||
#define DRM_BO_FLAG_NO_MOVE 0x00000100
|
||||
|
||||
/* Make sure the buffer is in cached memory when mapped for reading */
|
||||
#define DRM_BO_FLAG_READ_CACHED 0x00080000
|
||||
/* When there is a choice between VRAM and TT, prefer VRAM.
|
||||
The default behaviour is to prefer TT. */
|
||||
#define DRM_BO_FLAG_PREFER_VRAM 0x00040000
|
||||
/* Bind this buffer cached if the hardware supports it. */
|
||||
#define DRM_BO_FLAG_BIND_CACHED 0x0002000
|
||||
|
||||
/* System Memory */
|
||||
#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
|
||||
/* Translation table memory */
|
||||
#define DRM_BO_FLAG_MEM_TT 0x02000000
|
||||
/* Vram memory */
|
||||
#define DRM_BO_FLAG_MEM_VRAM 0x04000000
|
||||
/* Unmappable Vram memory */
|
||||
#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000
|
||||
/* Memory flag mask */
|
||||
#define DRM_BO_MASK_MEM 0xFF000000
|
||||
|
||||
/* When creating a buffer, Avoid system storage even if allowed */
|
||||
#define DRM_BO_HINT_AVOID_LOCAL 0x00000001
|
||||
/* Don't block on validate and map */
|
||||
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
|
||||
/* Don't place this buffer on the unfenced list.*/
|
||||
#define DRM_BO_HINT_DONT_FENCE 0x00000004
|
||||
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
|
||||
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
|
||||
|
||||
|
||||
/* Driver specific flags. Could be for example rendering engine */
|
||||
#define DRM_BO_MASK_DRIVER 0x00F00000
|
||||
|
||||
typedef enum {
|
||||
drm_bo_type_dc,
|
||||
drm_bo_type_user,
|
||||
drm_bo_type_fake
|
||||
}drm_bo_type_t;
|
||||
|
||||
|
||||
typedef struct drm_bo_arg_request {
|
||||
unsigned handle; /* User space handle */
|
||||
unsigned mask;
|
||||
unsigned hint;
|
||||
drm_u64_t size;
|
||||
drm_bo_type_t type;
|
||||
unsigned arg_handle;
|
||||
drm_u64_t buffer_start;
|
||||
unsigned page_alignment;
|
||||
unsigned expand_pad[4]; /*Future expansion */
|
||||
enum {
|
||||
drm_bo_create,
|
||||
drm_bo_validate,
|
||||
drm_bo_map,
|
||||
drm_bo_unmap,
|
||||
drm_bo_fence,
|
||||
drm_bo_destroy,
|
||||
drm_bo_reference,
|
||||
drm_bo_unreference,
|
||||
drm_bo_info,
|
||||
drm_bo_wait_idle,
|
||||
drm_bo_ref_fence
|
||||
} op;
|
||||
} drm_bo_arg_request_t;
|
||||
|
||||
|
||||
/*
|
||||
* Reply flags
|
||||
*/
|
||||
|
||||
#define DRM_BO_REP_BUSY 0x00000001
|
||||
|
||||
typedef struct drm_bo_arg_reply {
|
||||
int ret;
|
||||
unsigned handle;
|
||||
unsigned flags;
|
||||
drm_u64_t size;
|
||||
drm_u64_t offset;
|
||||
drm_u64_t arg_handle;
|
||||
unsigned mask;
|
||||
drm_u64_t buffer_start;
|
||||
unsigned fence_flags;
|
||||
unsigned rep_flags;
|
||||
unsigned page_alignment;
|
||||
unsigned expand_pad[4]; /*Future expansion */
|
||||
}drm_bo_arg_reply_t;
|
||||
|
||||
|
||||
typedef struct drm_bo_arg{
|
||||
int handled;
|
||||
drm_u64_t next;
|
||||
union {
|
||||
drm_bo_arg_request_t req;
|
||||
drm_bo_arg_reply_t rep;
|
||||
} d;
|
||||
} drm_bo_arg_t;
|
||||
|
||||
#define DRM_BO_MEM_LOCAL 0
|
||||
#define DRM_BO_MEM_TT 1
|
||||
#define DRM_BO_MEM_VRAM 2
|
||||
#define DRM_BO_MEM_VRAM_NM 3
|
||||
#define DRM_BO_MEM_TYPES 2 /* For now. */
|
||||
|
||||
typedef union drm_mm_init_arg{
|
||||
struct {
|
||||
enum {
|
||||
mm_init,
|
||||
mm_takedown,
|
||||
mm_query,
|
||||
mm_lock,
|
||||
mm_unlock
|
||||
} op;
|
||||
drm_u64_t p_offset;
|
||||
drm_u64_t p_size;
|
||||
unsigned mem_type;
|
||||
unsigned expand_pad[8]; /*Future expansion */
|
||||
} req;
|
||||
struct {
|
||||
drm_handle_t mm_sarea;
|
||||
unsigned expand_pad[8]; /*Future expansion */
|
||||
} rep;
|
||||
} drm_mm_init_arg_t;
|
||||
|
||||
/**
|
||||
* \name Ioctls Definitions
|
||||
*/
|
||||
|
@ -694,15 +911,23 @@ typedef struct drm_set_version {
|
|||
|
||||
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
|
||||
|
||||
#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t)
|
||||
#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t)
|
||||
#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t)
|
||||
|
||||
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
|
||||
|
||||
/*@}*/
|
||||
|
||||
/**
|
||||
* Device specific ioctls should only be in their respective headers
|
||||
* The device specific ioctl range is from 0x40 to 0x79.
|
||||
* The device specific ioctl range is from 0x40 to 0x99.
|
||||
* Generic IOCTLS restart at 0xA0.
|
||||
*
|
||||
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
|
||||
* drmCommandReadWrite().
|
||||
*/
|
||||
#define DRM_COMMAND_BASE 0x40
|
||||
#define DRM_COMMAND_END 0xA0
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,330 @@
|
|||
/**
|
||||
* \file drm_drawable.c
|
||||
* IOCTLs for drawables
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
* \author Michel Dänzer <michel@tungstengraphics.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
/**
|
||||
* Allocate drawable ID and memory to store information about it.
|
||||
*/
|
||||
int drm_adddraw(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
unsigned long irqflags;
|
||||
int i, j;
|
||||
u32 *bitfield = dev->drw_bitfield;
|
||||
unsigned int bitfield_length = dev->drw_bitfield_length;
|
||||
drm_drawable_info_t **info = dev->drw_info;
|
||||
unsigned int info_length = dev->drw_info_length;
|
||||
drm_draw_t draw;
|
||||
|
||||
for (i = 0, j = 0; i < bitfield_length; i++) {
|
||||
if (bitfield[i] == ~0)
|
||||
continue;
|
||||
|
||||
for (; j < 8 * sizeof(*bitfield); j++)
|
||||
if (!(bitfield[i] & (1 << j)))
|
||||
goto done;
|
||||
}
|
||||
done:
|
||||
|
||||
if (i == bitfield_length) {
|
||||
bitfield_length++;
|
||||
|
||||
bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
|
||||
DRM_MEM_BUFS);
|
||||
|
||||
if (!bitfield) {
|
||||
DRM_ERROR("Failed to allocate new drawable bitfield\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
|
||||
info_length += 8 * sizeof(*bitfield);
|
||||
|
||||
info = drm_alloc(info_length * sizeof(*info),
|
||||
DRM_MEM_BUFS);
|
||||
|
||||
if (!info) {
|
||||
DRM_ERROR("Failed to allocate new drawable info"
|
||||
" array\n");
|
||||
|
||||
drm_free(bitfield,
|
||||
bitfield_length * sizeof(*bitfield),
|
||||
DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
bitfield[i] = 0;
|
||||
}
|
||||
|
||||
draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
|
||||
DRM_DEBUG("%d\n", draw.handle);
|
||||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
||||
bitfield[i] |= 1 << j;
|
||||
info[draw.handle - 1] = NULL;
|
||||
|
||||
if (bitfield != dev->drw_bitfield) {
|
||||
memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
|
||||
sizeof(*bitfield));
|
||||
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
|
||||
dev->drw_bitfield_length, DRM_MEM_BUFS);
|
||||
dev->drw_bitfield = bitfield;
|
||||
dev->drw_bitfield_length = bitfield_length;
|
||||
}
|
||||
|
||||
if (info != dev->drw_info) {
|
||||
memcpy(info, dev->drw_info, dev->drw_info_length *
|
||||
sizeof(*info));
|
||||
drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
|
||||
DRM_MEM_BUFS);
|
||||
dev->drw_info = info;
|
||||
dev->drw_info_length = info_length;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free drawable ID and memory to store information about it.
|
||||
*/
|
||||
int drm_rmdraw(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
drm_draw_t draw;
|
||||
int id, idx;
|
||||
unsigned int shift;
|
||||
unsigned long irqflags;
|
||||
u32 *bitfield = dev->drw_bitfield;
|
||||
unsigned int bitfield_length = dev->drw_bitfield_length;
|
||||
drm_drawable_info_t **info = dev->drw_info;
|
||||
unsigned int info_length = dev->drw_info_length;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
|
||||
sizeof(draw));
|
||||
|
||||
id = draw.handle - 1;
|
||||
idx = id / (8 * sizeof(*bitfield));
|
||||
shift = id % (8 * sizeof(*bitfield));
|
||||
|
||||
if (idx < 0 || idx >= bitfield_length ||
|
||||
!(bitfield[idx] & (1 << shift))) {
|
||||
DRM_DEBUG("No such drawable %d\n", draw.handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
||||
bitfield[idx] &= ~(1 << shift);
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
|
||||
if (info[id]) {
|
||||
drm_free(info[id]->rects, info[id]->num_rects *
|
||||
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
|
||||
drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
|
||||
}
|
||||
|
||||
/* Can we shrink the arrays? */
|
||||
if (idx == bitfield_length - 1) {
|
||||
while (idx >= 0 && !bitfield[idx])
|
||||
--idx;
|
||||
|
||||
bitfield_length = idx + 1;
|
||||
|
||||
if (idx != id / (8 * sizeof(*bitfield)))
|
||||
bitfield = drm_alloc(bitfield_length *
|
||||
sizeof(*bitfield), DRM_MEM_BUFS);
|
||||
|
||||
if (!bitfield && bitfield_length) {
|
||||
bitfield = dev->drw_bitfield;
|
||||
bitfield_length = dev->drw_bitfield_length;
|
||||
}
|
||||
}
|
||||
|
||||
if (bitfield != dev->drw_bitfield) {
|
||||
info_length = 8 * sizeof(*bitfield) * bitfield_length;
|
||||
|
||||
info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
|
||||
|
||||
if (!info && info_length) {
|
||||
info = dev->drw_info;
|
||||
info_length = dev->drw_info_length;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
||||
memcpy(bitfield, dev->drw_bitfield, bitfield_length *
|
||||
sizeof(*bitfield));
|
||||
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
|
||||
dev->drw_bitfield_length, DRM_MEM_BUFS);
|
||||
dev->drw_bitfield = bitfield;
|
||||
dev->drw_bitfield_length = bitfield_length;
|
||||
|
||||
if (info != dev->drw_info) {
|
||||
memcpy(info, dev->drw_info, info_length *
|
||||
sizeof(*info));
|
||||
drm_free(dev->drw_info, sizeof(*info) *
|
||||
dev->drw_info_length, DRM_MEM_BUFS);
|
||||
dev->drw_info = info;
|
||||
dev->drw_info_length = info_length;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
}
|
||||
|
||||
DRM_DEBUG("%d\n", draw.handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_update_drawable_info(DRM_IOCTL_ARGS) {
|
||||
DRM_DEVICE;
|
||||
drm_update_draw_t update;
|
||||
unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length;
|
||||
u32 *bitfield = dev->drw_bitfield;
|
||||
unsigned long irqflags;
|
||||
drm_drawable_info_t *info;
|
||||
drm_clip_rect_t *rects;
|
||||
int err;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
|
||||
sizeof(update));
|
||||
|
||||
id = update.handle - 1;
|
||||
idx = id / (8 * sizeof(*bitfield));
|
||||
shift = id % (8 * sizeof(*bitfield));
|
||||
|
||||
if (idx < 0 || idx >= bitfield_length ||
|
||||
!(bitfield[idx] & (1 << shift))) {
|
||||
DRM_ERROR("No such drawable %d\n", update.handle);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
info = dev->drw_info[id];
|
||||
|
||||
if (!info) {
|
||||
info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
|
||||
|
||||
if (!info) {
|
||||
DRM_ERROR("Failed to allocate drawable info memory\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
switch (update.type) {
|
||||
case DRM_DRAWABLE_CLIPRECTS:
|
||||
if (update.num != info->num_rects) {
|
||||
rects = drm_alloc(update.num * sizeof(drm_clip_rect_t),
|
||||
DRM_MEM_BUFS);
|
||||
} else
|
||||
rects = info->rects;
|
||||
|
||||
if (update.num && !rects) {
|
||||
DRM_ERROR("Failed to allocate cliprect memory\n");
|
||||
err = DRM_ERR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (update.num && DRM_COPY_FROM_USER(rects,
|
||||
(drm_clip_rect_t __user *)
|
||||
(unsigned long)update.data,
|
||||
update.num *
|
||||
sizeof(*rects))) {
|
||||
DRM_ERROR("Failed to copy cliprects from userspace\n");
|
||||
err = DRM_ERR(EFAULT);
|
||||
goto error;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
||||
if (rects != info->rects) {
|
||||
drm_free(info->rects, info->num_rects *
|
||||
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
|
||||
}
|
||||
|
||||
info->rects = rects;
|
||||
info->num_rects = update.num;
|
||||
dev->drw_info[id] = info;
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
|
||||
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
|
||||
info->num_rects, id);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid update type %d\n", update.type);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (!dev->drw_info[id])
|
||||
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
|
||||
else if (rects != dev->drw_info[id]->rects)
|
||||
drm_free(rects, update.num *
|
||||
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Caller must hold the drawable spinlock!
|
||||
*/
|
||||
drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
|
||||
u32 *bitfield = dev->drw_bitfield;
|
||||
unsigned int idx, shift;
|
||||
|
||||
id--;
|
||||
idx = id / (8 * sizeof(*bitfield));
|
||||
shift = id % (8 * sizeof(*bitfield));
|
||||
|
||||
if (idx < 0 || idx >= dev->drw_bitfield_length ||
|
||||
!(bitfield[idx] & (1 << shift))) {
|
||||
DRM_DEBUG("No such drawable %d\n", id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dev->drw_info[id];
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_drawable_info);
|
|
@ -1,11 +1,11 @@
|
|||
[radeon]
|
||||
0x1002 0x3150 CHIP_RV380|CHIP_IS_MOBILITY "ATI Radeon Mobility X600 M24"
|
||||
0x1002 0x3152 CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X300 M24"
|
||||
0x1002 0x3154 CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI FireGL M24 GL"
|
||||
0x1002 0x3E50 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV380 X600"
|
||||
0x1002 0x3E54 CHIP_RV380|CHIP_NEW_MEMMAP "ATI FireGL V3200 RV380"
|
||||
0x1002 0x4136 CHIP_RS100|CHIP_IS_IGP "ATI Radeon RS100 IGP 320"
|
||||
0x1002 0x4137 CHIP_RS200|CHIP_IS_IGP "ATI Radeon RS200 IGP 340"
|
||||
0x1002 0x3150 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 M24"
|
||||
0x1002 0x3152 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X300 M24"
|
||||
0x1002 0x3154 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI FireGL M24 GL"
|
||||
0x1002 0x3E50 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV380 X600"
|
||||
0x1002 0x3E54 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireGL V3200 RV380"
|
||||
0x1002 0x4136 CHIP_RS100|RADEON_IS_IGP "ATI Radeon RS100 IGP 320"
|
||||
0x1002 0x4137 CHIP_RS200|RADEON_IS_IGP "ATI Radeon RS200 IGP 340"
|
||||
0x1002 0x4144 CHIP_R300 "ATI Radeon AD 9500"
|
||||
0x1002 0x4145 CHIP_R300 "ATI Radeon AE 9700 Pro"
|
||||
0x1002 0x4146 CHIP_R300 "ATI Radeon AF R300 9600TX"
|
||||
|
@ -21,35 +21,35 @@
|
|||
0x1002 0x4154 CHIP_RV350 "ATI FireGL AT T2"
|
||||
0x1002 0x4155 CHIP_RV350 "ATI Radeon 9650"
|
||||
0x1002 0x4156 CHIP_RV350 "ATI FireGL AV RV360 T2"
|
||||
0x1002 0x4237 CHIP_RS200|CHIP_IS_IGP "ATI Radeon RS250 IGP"
|
||||
0x1002 0x4237 CHIP_RS200|RADEON_IS_IGP "ATI Radeon RS250 IGP"
|
||||
0x1002 0x4242 CHIP_R200 "ATI Radeon BB R200 AIW 8500DV"
|
||||
0x1002 0x4243 CHIP_R200 "ATI Radeon BC R200"
|
||||
0x1002 0x4336 CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS100 Mobility U1"
|
||||
0x1002 0x4337 CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS200 Mobility IGP 340M"
|
||||
0x1002 0x4437 CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS250 Mobility IGP"
|
||||
0x1002 0x4336 CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS100 Mobility U1"
|
||||
0x1002 0x4337 CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS200 Mobility IGP 340M"
|
||||
0x1002 0x4437 CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS250 Mobility IGP"
|
||||
0x1002 0x4966 CHIP_RV250 "ATI Radeon If RV250 9000"
|
||||
0x1002 0x4967 CHIP_RV250 "ATI Radeon Ig RV250 9000"
|
||||
0x1002 0x4A48 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JH R420 X800"
|
||||
0x1002 0x4A49 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JI R420 X800 Pro"
|
||||
0x1002 0x4A4A CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JJ R420 X800 SE"
|
||||
0x1002 0x4A4B CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JK R420 X800 XT"
|
||||
0x1002 0x4A4C CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JL R420 X800"
|
||||
0x1002 0x4A4D CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL JM X3-256"
|
||||
0x1002 0x4A4E CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon JN R420 Mobility M18"
|
||||
0x1002 0x4A4F CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JO R420 X800 SE"
|
||||
0x1002 0x4A50 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JP R420 X800 XT PE"
|
||||
0x1002 0x4A54 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JT R420 AIW X800 VE"
|
||||
0x1002 0x4B49 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 XT"
|
||||
0x1002 0x4B4A CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 SE"
|
||||
0x1002 0x4B4B CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 Pro"
|
||||
0x1002 0x4B4C CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 XT PE"
|
||||
0x1002 0x4C57 CHIP_RV200|CHIP_IS_MOBILITY "ATI Radeon LW RV200 Mobility 7500 M7"
|
||||
0x1002 0x4C58 CHIP_RV200|CHIP_IS_MOBILITY "ATI Radeon LX RV200 Mobility FireGL 7800 M7"
|
||||
0x1002 0x4C59 CHIP_RV100|CHIP_IS_MOBILITY "ATI Radeon LY RV100 Mobility M6"
|
||||
0x1002 0x4C5A CHIP_RV100|CHIP_IS_MOBILITY "ATI Radeon LZ RV100 Mobility M6"
|
||||
0x1002 0x4C64 CHIP_RV250|CHIP_IS_MOBILITY "ATI Radeon Ld RV250 Mobility 9000 M9"
|
||||
0x1002 0x4A48 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JH R420 X800"
|
||||
0x1002 0x4A49 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JI R420 X800 Pro"
|
||||
0x1002 0x4A4A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JJ R420 X800 SE"
|
||||
0x1002 0x4A4B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JK R420 X800 XT"
|
||||
0x1002 0x4A4C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JL R420 X800"
|
||||
0x1002 0x4A4D CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL JM X3-256"
|
||||
0x1002 0x4A4E CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon JN R420 Mobility M18"
|
||||
0x1002 0x4A4F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JO R420 X800 SE"
|
||||
0x1002 0x4A50 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JP R420 X800 XT PE"
|
||||
0x1002 0x4A54 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JT R420 AIW X800 VE"
|
||||
0x1002 0x4B49 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 XT"
|
||||
0x1002 0x4B4A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 SE"
|
||||
0x1002 0x4B4B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 Pro"
|
||||
0x1002 0x4B4C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 XT PE"
|
||||
0x1002 0x4C57 CHIP_RV200|RADEON_IS_MOBILITY "ATI Radeon LW RV200 Mobility 7500 M7"
|
||||
0x1002 0x4C58 CHIP_RV200|RADEON_IS_MOBILITY "ATI Radeon LX RV200 Mobility FireGL 7800 M7"
|
||||
0x1002 0x4C59 CHIP_RV100|RADEON_IS_MOBILITY "ATI Radeon LY RV100 Mobility M6"
|
||||
0x1002 0x4C5A CHIP_RV100|RADEON_IS_MOBILITY "ATI Radeon LZ RV100 Mobility M6"
|
||||
0x1002 0x4C64 CHIP_RV250|RADEON_IS_MOBILITY "ATI Radeon Ld RV250 Mobility 9000 M9"
|
||||
0x1002 0x4C66 CHIP_RV250 "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"
|
||||
0x1002 0x4C67 CHIP_RV250|CHIP_IS_MOBILITY "ATI Radeon Lg RV250 Mobility 9000 M9"
|
||||
0x1002 0x4C67 CHIP_RV250|RADEON_IS_MOBILITY "ATI Radeon Lg RV250 Mobility 9000 M9"
|
||||
0x1002 0x4E44 CHIP_R300 "ATI Radeon ND R300 9700 Pro"
|
||||
0x1002 0x4E45 CHIP_R300 "ATI Radeon NE R300 9500 Pro / 9700"
|
||||
0x1002 0x4E46 CHIP_R300 "ATI Radeon NF R300 9600TX"
|
||||
|
@ -58,16 +58,16 @@
|
|||
0x1002 0x4E49 CHIP_R350 "ATI Radeon NI R350 9800"
|
||||
0x1002 0x4E4A CHIP_R350 "ATI Radeon NJ R360 9800 XT"
|
||||
0x1002 0x4E4B CHIP_R350 "ATI FireGL NK X2"
|
||||
0x1002 0x4E50 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NP"
|
||||
0x1002 0x4E51 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NQ"
|
||||
0x1002 0x4E52 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M11 NR"
|
||||
0x1002 0x4E53 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NS"
|
||||
0x1002 0x4E54 CHIP_RV350|CHIP_IS_MOBILITY "ATI FireGL T2/T2e"
|
||||
0x1002 0x4E56 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon Mobility 9550"
|
||||
0x1002 0x5144 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QD R100"
|
||||
0x1002 0x5145 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QE R100"
|
||||
0x1002 0x5146 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QF R100"
|
||||
0x1002 0x5147 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QG R100"
|
||||
0x1002 0x4E50 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NP"
|
||||
0x1002 0x4E51 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NQ"
|
||||
0x1002 0x4E52 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M11 NR"
|
||||
0x1002 0x4E53 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NS"
|
||||
0x1002 0x4E54 CHIP_RV350|RADEON_IS_MOBILITY "ATI FireGL T2/T2e"
|
||||
0x1002 0x4E56 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon Mobility 9550"
|
||||
0x1002 0x5144 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QD R100"
|
||||
0x1002 0x5145 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QE R100"
|
||||
0x1002 0x5146 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QF R100"
|
||||
0x1002 0x5147 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QG R100"
|
||||
0x1002 0x5148 CHIP_R200 "ATI Radeon QH R200 8500"
|
||||
0x1002 0x514C CHIP_R200 "ATI Radeon QL R200 8500 LE"
|
||||
0x1002 0x514D CHIP_R200 "ATI Radeon QM R200 9100"
|
||||
|
@ -76,59 +76,59 @@
|
|||
0x1002 0x5159 CHIP_RV100 "ATI Radeon QY RV100 7000/VE"
|
||||
0x1002 0x515A CHIP_RV100 "ATI Radeon QZ RV100 7000/VE"
|
||||
0x1002 0x515E CHIP_RV100 "ATI ES1000 RN50"
|
||||
0x1002 0x5460 CHIP_RV380|CHIP_IS_MOBILITY "ATI Radeon Mobility X300 M22"
|
||||
0x1002 0x5462 CHIP_RV380|CHIP_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
|
||||
0x1002 0x5464 CHIP_RV380|CHIP_IS_MOBILITY "ATI FireGL M22 GL 5464"
|
||||
0x1002 0x5548 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800"
|
||||
0x1002 0x5549 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
|
||||
0x1002 0x554A CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
|
||||
0x1002 0x554B CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 SE"
|
||||
0x1002 0x554C CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800 XTP"
|
||||
0x1002 0x554D CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800 XL"
|
||||
0x1002 0x554E CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800 SE"
|
||||
0x1002 0x554F CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800"
|
||||
0x1002 0x5550 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL V7100 R423"
|
||||
0x1002 0x5551 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL V5100 R423 UQ"
|
||||
0x1002 0x5552 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL unknown R423 UR"
|
||||
0x1002 0x5554 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL unknown R423 UT"
|
||||
0x1002 0x564A CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
|
||||
0x1002 0x564B CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
|
||||
0x1002 0x564F CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26"
|
||||
0x1002 0x5652 CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
|
||||
0x1002 0x5653 CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
|
||||
0x1002 0x5834 CHIP_RS300|CHIP_IS_IGP "ATI Radeon RS300 9100 IGP"
|
||||
0x1002 0x5835 CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
|
||||
0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22"
|
||||
0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
|
||||
0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464"
|
||||
0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
|
||||
0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
|
||||
0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
|
||||
0x1002 0x554B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 SE"
|
||||
0x1002 0x554C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XTP"
|
||||
0x1002 0x554D CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XL"
|
||||
0x1002 0x554E CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 SE"
|
||||
0x1002 0x554F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800"
|
||||
0x1002 0x5550 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V7100 R423"
|
||||
0x1002 0x5551 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V5100 R423 UQ"
|
||||
0x1002 0x5552 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UR"
|
||||
0x1002 0x5554 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UT"
|
||||
0x1002 0x564A CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
|
||||
0x1002 0x564B CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
|
||||
0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26"
|
||||
0x1002 0x5652 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
|
||||
0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
|
||||
0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP"
|
||||
0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
|
||||
0x1002 0x5960 CHIP_RV280 "ATI Radeon RV280 9250"
|
||||
0x1002 0x5961 CHIP_RV280 "ATI Radeon RV280 9200"
|
||||
0x1002 0x5962 CHIP_RV280 "ATI Radeon RV280 9200"
|
||||
0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE"
|
||||
0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI"
|
||||
0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50"
|
||||
0x1002 0x5b60 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
|
||||
0x1002 0x5b62 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
|
||||
0x1002 0x5b63 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV370 X550"
|
||||
0x1002 0x5b64 CHIP_RV380|CHIP_NEW_MEMMAP "ATI FireGL V3100 (RV370) 5B64"
|
||||
0x1002 0x5b65 CHIP_RV380|CHIP_NEW_MEMMAP "ATI FireMV 2200 PCIE (RV370) 5B65"
|
||||
0x1002 0x5c61 CHIP_RV280|CHIP_IS_MOBILITY "ATI Radeon RV280 Mobility"
|
||||
0x1002 0x5c63 CHIP_RV280|CHIP_IS_MOBILITY "ATI Radeon RV280 Mobility"
|
||||
0x1002 0x5d48 CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28"
|
||||
0x1002 0x5d49 CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility FireGL V5100 M28"
|
||||
0x1002 0x5d4a CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility Radeon X800 M28"
|
||||
0x1002 0x5d4c CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850"
|
||||
0x1002 0x5d4d CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 XT PE"
|
||||
0x1002 0x5d4e CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 SE"
|
||||
0x1002 0x5d4f CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 Pro"
|
||||
0x1002 0x5d50 CHIP_R420|CHIP_NEW_MEMMAP "ATI unknown Radeon / FireGL R480"
|
||||
0x1002 0x5d52 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 XT"
|
||||
0x1002 0x5d57 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 XT"
|
||||
0x1002 0x5e48 CHIP_RV410|CHIP_NEW_MEMMAP "ATI FireGL V5000 RV410"
|
||||
0x1002 0x5e4a CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 XT"
|
||||
0x1002 0x5e4b CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 Pro"
|
||||
0x1002 0x5e4c CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
|
||||
0x1002 0x5e4d CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700"
|
||||
0x1002 0x5e4f CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
|
||||
0x1002 0x7834 CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"
|
||||
0x1002 0x7835 CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
|
||||
0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
|
||||
0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
|
||||
0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550"
|
||||
0x1002 0x5b64 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireGL V3100 (RV370) 5B64"
|
||||
0x1002 0x5b65 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireMV 2200 PCIE (RV370) 5B65"
|
||||
0x1002 0x5c61 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility"
|
||||
0x1002 0x5c63 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility"
|
||||
0x1002 0x5d48 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28"
|
||||
0x1002 0x5d49 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5100 M28"
|
||||
0x1002 0x5d4a CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 M28"
|
||||
0x1002 0x5d4c CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850"
|
||||
0x1002 0x5d4d CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT PE"
|
||||
0x1002 0x5d4e CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 SE"
|
||||
0x1002 0x5d4f CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 Pro"
|
||||
0x1002 0x5d50 CHIP_R420|RADEON_NEW_MEMMAP "ATI unknown Radeon / FireGL R480"
|
||||
0x1002 0x5d52 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT"
|
||||
0x1002 0x5d57 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT"
|
||||
0x1002 0x5e48 CHIP_RV410|RADEON_NEW_MEMMAP "ATI FireGL V5000 RV410"
|
||||
0x1002 0x5e4a CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 XT"
|
||||
0x1002 0x5e4b CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 Pro"
|
||||
0x1002 0x5e4c CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
|
||||
0x1002 0x5e4d CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700"
|
||||
0x1002 0x5e4f CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
|
||||
0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"
|
||||
0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
|
||||
|
||||
[r128]
|
||||
0x1002 0x4c45 0 "ATI Rage 128 Mobility LE (PCI)"
|
||||
|
@ -186,6 +186,7 @@
|
|||
0x1002 0x4c51 0 "3D Rage LT Pro"
|
||||
0x1002 0x4c42 0 "3D Rage LT Pro AGP-133"
|
||||
0x1002 0x4c44 0 "3D Rage LT Pro AGP-66"
|
||||
0x1002 0x4759 0 "Rage 3D IICATI 3D RAGE IIC AGP(A12/A13)
|
||||
0x1002 0x474c 0 "Rage XC"
|
||||
0x1002 0x474f 0 "Rage XL"
|
||||
0x1002 0x4752 0 "Rage XL"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
|
||||
/ i915_dma.c -- DMA support for the I915 -*- linux-c -*-
|
||||
*/
|
||||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
|
@ -37,7 +37,6 @@
|
|||
dev->pdev->device == 0x29A2 || \
|
||||
dev->pdev->device == 0x2A02)
|
||||
|
||||
|
||||
/* Really want an OS-independent resettable timer. Would like to have
|
||||
* this loop run for (eg) 3 sec, but have the timer reset every time
|
||||
* the head pointer changes, so that EBUSY only happens if the ring
|
||||
|
@ -164,6 +163,7 @@ static int i915_initialize(drm_device_t * dev,
|
|||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
dev_priv->cpp = init->cpp;
|
||||
dev_priv->back_offset = init->back_offset;
|
||||
dev_priv->front_offset = init->front_offset;
|
||||
dev_priv->current_page = 0;
|
||||
|
@ -196,9 +196,10 @@ static int i915_initialize(drm_device_t * dev,
|
|||
|
||||
I915_WRITE(0x02080, dev_priv->dma_status_page);
|
||||
DRM_DEBUG("Enabled hardware status page\n");
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
drm_bo_driver_init(dev);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -435,17 +436,39 @@ static void i915_emit_breadcrumb(drm_device_t *dev)
|
|||
|
||||
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
|
||||
|
||||
if (dev_priv->counter > 0x7FFFFFFFUL)
|
||||
dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(CMD_STORE_DWORD_IDX);
|
||||
OUT_RING(20);
|
||||
OUT_RING(dev_priv->counter);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
#ifdef I915_HAVE_FENCE
|
||||
drm_fence_flush_old(dev, dev_priv->counter);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t flush_cmd = CMD_MI_FLUSH;
|
||||
RING_LOCALS;
|
||||
|
||||
flush_cmd |= flush;
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(flush_cmd);
|
||||
OUT_RING(0);
|
||||
OUT_RING(0);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int i915_dispatch_cmdbuffer(drm_device_t * dev,
|
||||
drm_i915_cmdbuffer_t * cmd)
|
||||
{
|
||||
|
@ -566,7 +589,9 @@ static int i915_dispatch_flip(drm_device_t * dev)
|
|||
OUT_RING(dev_priv->counter);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
#ifdef I915_HAVE_FENCE
|
||||
drm_fence_flush_old(dev, dev_priv->counter);
|
||||
#endif
|
||||
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
|
||||
return 0;
|
||||
}
|
||||
|
@ -680,6 +705,7 @@ static int i915_flip_bufs(DRM_IOCTL_ARGS)
|
|||
return i915_dispatch_flip(dev);
|
||||
}
|
||||
|
||||
|
||||
static int i915_getparam(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -798,6 +824,7 @@ drm_ioctl_desc_t i915_ioctls[] = {
|
|||
[DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
|
||||
[DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
|
||||
[DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
|
||||
[DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
|
||||
};
|
||||
|
||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||
|
|
|
@ -104,8 +104,27 @@ typedef struct _drm_i915_sarea {
|
|||
unsigned int depth_tiled;
|
||||
unsigned int rotated_tiled;
|
||||
unsigned int rotated2_tiled;
|
||||
|
||||
int pipeA_x;
|
||||
int pipeA_y;
|
||||
int pipeA_w;
|
||||
int pipeA_h;
|
||||
int pipeB_x;
|
||||
int pipeB_y;
|
||||
int pipeB_w;
|
||||
int pipeB_h;
|
||||
} drm_i915_sarea_t;
|
||||
|
||||
/* Driver specific fence types and classes.
|
||||
*/
|
||||
|
||||
/* The only fence class we support */
|
||||
#define DRM_I915_FENCE_CLASS_ACCEL 0
|
||||
/* Fence type that guarantees read-write flush */
|
||||
#define DRM_I915_FENCE_TYPE_RW 2
|
||||
/* MI_FLUSH programmed just before the fence */
|
||||
#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
|
||||
|
||||
/* Flags for perf_boxes
|
||||
*/
|
||||
#define I915_BOX_RING_EMPTY 0x1
|
||||
|
@ -132,6 +151,7 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_I915_DESTROY_HEAP 0x0c
|
||||
#define DRM_I915_SET_VBLANK_PIPE 0x0d
|
||||
#define DRM_I915_GET_VBLANK_PIPE 0x0e
|
||||
#define DRM_I915_VBLANK_SWAP 0x0f
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
@ -148,6 +168,7 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
|
||||
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
||||
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
|
@ -244,4 +265,12 @@ typedef struct drm_i915_vblank_pipe {
|
|||
int pipe;
|
||||
} drm_i915_vblank_pipe_t;
|
||||
|
||||
/* Schedule buffer swap at given vertical blank:
|
||||
*/
|
||||
typedef struct drm_i915_vblank_swap {
|
||||
drm_drawable_t drawable;
|
||||
drm_vblank_seq_type_t seqtype;
|
||||
unsigned int sequence;
|
||||
} drm_i915_vblank_swap_t;
|
||||
|
||||
#endif /* _I915_DRM_H_ */
|
||||
|
|
|
@ -35,9 +35,9 @@
|
|||
|
||||
#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_NAME "i915-mm"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20060119"
|
||||
#define DRIVER_DATE "20060929"
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
|
@ -46,11 +46,18 @@
|
|||
* 1.3: Add vblank support
|
||||
* 1.4: Fix cmdbuffer path, add heap destroy
|
||||
* 1.5: Add vblank pipe configuration
|
||||
* 1.6: - New ioctl for scheduling buffer swaps on vertical blank
|
||||
* - Support vertical blank on secondary display pipe
|
||||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 5
|
||||
#define DRIVER_MINOR 7
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#if defined(__linux__)
|
||||
#define I915_HAVE_FENCE
|
||||
#define I915_HAVE_BUFFER
|
||||
#endif
|
||||
|
||||
typedef struct _drm_i915_ring_buffer {
|
||||
int tail_mask;
|
||||
unsigned long Start;
|
||||
|
@ -71,6 +78,13 @@ struct mem_block {
|
|||
DRMFILE filp; /* 0: free, -1: heap, other: real files */
|
||||
};
|
||||
|
||||
typedef struct _drm_i915_vbl_swap {
|
||||
struct list_head head;
|
||||
drm_drawable_t drw_id;
|
||||
unsigned int pipe;
|
||||
unsigned int sequence;
|
||||
} drm_i915_vbl_swap_t;
|
||||
|
||||
typedef struct drm_i915_private {
|
||||
drm_local_map_t *sarea;
|
||||
drm_local_map_t *mmio_map;
|
||||
|
@ -81,8 +95,9 @@ typedef struct drm_i915_private {
|
|||
drm_dma_handle_t *status_page_dmah;
|
||||
void *hw_status_page;
|
||||
dma_addr_t dma_status_page;
|
||||
unsigned long counter;
|
||||
uint32_t counter;
|
||||
|
||||
unsigned int cpp;
|
||||
int back_offset;
|
||||
int front_offset;
|
||||
int current_page;
|
||||
|
@ -98,6 +113,22 @@ typedef struct drm_i915_private {
|
|||
struct mem_block *agp_heap;
|
||||
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
|
||||
int vblank_pipe;
|
||||
spinlock_t user_irq_lock;
|
||||
int user_irq_refcount;
|
||||
int fence_irq_on;
|
||||
uint32_t irq_enable_reg;
|
||||
int irq_enabled;
|
||||
|
||||
#ifdef I915_HAVE_FENCE
|
||||
uint32_t flush_sequence;
|
||||
uint32_t flush_flags;
|
||||
uint32_t flush_pending;
|
||||
uint32_t saved_flush_status;
|
||||
#endif
|
||||
|
||||
spinlock_t swaps_lock;
|
||||
drm_i915_vbl_swap_t vbl_swaps;
|
||||
unsigned int swaps_pending;
|
||||
} drm_i915_private_t;
|
||||
|
||||
extern drm_ioctl_desc_t i915_ioctls[];
|
||||
|
@ -111,18 +142,25 @@ extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
|
|||
extern int i915_driver_device_is_agp(drm_device_t * dev);
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
|
||||
|
||||
|
||||
/* i915_irq.c */
|
||||
extern int i915_irq_emit(DRM_IOCTL_ARGS);
|
||||
extern int i915_irq_wait(DRM_IOCTL_ARGS);
|
||||
|
||||
extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
|
||||
extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence);
|
||||
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
|
||||
extern void i915_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void i915_driver_irq_postinstall(drm_device_t * dev);
|
||||
extern void i915_driver_irq_uninstall(drm_device_t * dev);
|
||||
extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
|
||||
extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
|
||||
extern int i915_emit_irq(drm_device_t * dev);
|
||||
extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
|
||||
extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
|
||||
extern int i915_vblank_swap(DRM_IOCTL_ARGS);
|
||||
|
||||
/* i915_mem.c */
|
||||
extern int i915_mem_alloc(DRM_IOCTL_ARGS);
|
||||
|
@ -132,6 +170,23 @@ extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
|
|||
extern void i915_mem_takedown(struct mem_block **heap);
|
||||
extern void i915_mem_release(drm_device_t * dev,
|
||||
DRMFILE filp, struct mem_block *heap);
|
||||
#ifdef I915_HAVE_FENCE
|
||||
/* i915_fence.c */
|
||||
|
||||
|
||||
extern void i915_fence_handler(drm_device_t *dev);
|
||||
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
|
||||
uint32_t *sequence,
|
||||
uint32_t *native_type);
|
||||
extern void i915_poke_flush(drm_device_t *dev);
|
||||
#endif
|
||||
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
/* i915_buffer.c */
|
||||
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
|
||||
extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
|
||||
extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
|
||||
#endif
|
||||
|
||||
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
|
||||
|
@ -182,6 +237,11 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
|||
#define INST_OP_FLUSH 0x02000000
|
||||
#define INST_FLUSH_MAP_CACHE 0x00000001
|
||||
|
||||
#define CMD_MI_FLUSH (0x04 << 23)
|
||||
#define MI_NO_WRITE_FLUSH (1 << 2)
|
||||
#define MI_READ_FLUSH (1 << 0)
|
||||
#define MI_EXE_FLUSH (1 << 1)
|
||||
|
||||
#define BB1_START_ADDR_MASK (~0x7)
|
||||
#define BB1_PROTECTED (1<<0)
|
||||
#define BB1_UNPROTECTED (0<<0)
|
||||
|
@ -191,6 +251,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
|||
#define I915REG_INT_IDENTITY_R 0x020a4
|
||||
#define I915REG_INT_MASK_R 0x020a8
|
||||
#define I915REG_INT_ENABLE_R 0x020a0
|
||||
#define I915REG_INSTPM 0x020c0
|
||||
|
||||
#define SRX_INDEX 0x3c4
|
||||
#define SRX_DATA 0x3c5
|
||||
|
@ -256,6 +317,10 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
|||
|
||||
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
|
||||
|
||||
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
||||
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
|
||||
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
|
||||
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
|
@ -272,6 +337,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
|||
|
||||
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
|
||||
|
||||
#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
|
||||
|
||||
#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
|
||||
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
|
||||
#endif
|
||||
|
|
|
@ -37,6 +37,99 @@
|
|||
|
||||
#define MAX_NOPID ((u32)~0)
|
||||
|
||||
/**
|
||||
* Emit blits for scheduled buffer swaps.
|
||||
*
|
||||
* This function will be called with the HW lock held.
|
||||
*/
|
||||
static void i915_vblank_tasklet(drm_device_t *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
struct list_head *list, *tmp;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
|
||||
|
||||
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
|
||||
drm_i915_vbl_swap_t *vbl_swap =
|
||||
list_entry(list, drm_i915_vbl_swap_t, head);
|
||||
atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 :
|
||||
&dev->vbl_received;
|
||||
|
||||
if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) {
|
||||
drm_drawable_info_t *drw;
|
||||
|
||||
spin_unlock(&dev_priv->swaps_lock);
|
||||
|
||||
spin_lock(&dev->drw_lock);
|
||||
|
||||
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
|
||||
|
||||
if (drw) {
|
||||
int i, num_rects = drw->num_rects;
|
||||
drm_clip_rect_t *rect = drw->rects;
|
||||
drm_i915_sarea_t *sarea_priv =
|
||||
dev_priv->sarea_priv;
|
||||
u32 cpp = dev_priv->cpp;
|
||||
u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
|
||||
XY_SRC_COPY_BLT_WRITE_ALPHA |
|
||||
XY_SRC_COPY_BLT_WRITE_RGB)
|
||||
: XY_SRC_COPY_BLT_CMD;
|
||||
u32 pitchropcpp = (sarea_priv->pitch * cpp) |
|
||||
(0xcc << 16) | (cpp << 23) |
|
||||
(1 << 24);
|
||||
RING_LOCALS;
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
|
||||
BEGIN_LP_RING(6);
|
||||
|
||||
OUT_RING(GFX_OP_DRAWRECT_INFO);
|
||||
OUT_RING(0);
|
||||
OUT_RING(0);
|
||||
OUT_RING(sarea_priv->width |
|
||||
sarea_priv->height << 16);
|
||||
OUT_RING(sarea_priv->width |
|
||||
sarea_priv->height << 16);
|
||||
OUT_RING(0);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
|
||||
|
||||
for (i = 0; i < num_rects; i++, rect++) {
|
||||
BEGIN_LP_RING(8);
|
||||
|
||||
OUT_RING(cmd);
|
||||
OUT_RING(pitchropcpp);
|
||||
OUT_RING((rect->y1 << 16) | rect->x1);
|
||||
OUT_RING((rect->y2 << 16) | rect->x2);
|
||||
OUT_RING(sarea_priv->front_offset);
|
||||
OUT_RING((rect->y1 << 16) | rect->x1);
|
||||
OUT_RING(pitchropcpp & 0xffff);
|
||||
OUT_RING(sarea_priv->back_offset);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&dev->drw_lock);
|
||||
|
||||
spin_lock(&dev_priv->swaps_lock);
|
||||
|
||||
list_del(list);
|
||||
|
||||
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
|
||||
|
||||
dev_priv->swaps_pending--;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
|
||||
}
|
||||
|
||||
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) arg;
|
||||
|
@ -45,10 +138,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
|
||||
temp = I915_READ16(I915REG_INT_IDENTITY_R);
|
||||
|
||||
temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
|
||||
temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
|
||||
|
||||
#if 0
|
||||
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
|
||||
|
||||
#endif
|
||||
if (temp == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
|
@ -56,19 +150,40 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
|
||||
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
if (temp & USER_INT_FLAG)
|
||||
if (temp & USER_INT_FLAG) {
|
||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
#ifdef I915_HAVE_FENCE
|
||||
i915_fence_handler(dev);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
|
||||
atomic_inc(&dev->vbl_received);
|
||||
int vblank_pipe = dev_priv->vblank_pipe;
|
||||
|
||||
if ((vblank_pipe &
|
||||
(DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
|
||||
== (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
|
||||
if (temp & VSYNC_PIPEA_FLAG)
|
||||
atomic_inc(&dev->vbl_received);
|
||||
if (temp & VSYNC_PIPEB_FLAG)
|
||||
atomic_inc(&dev->vbl_received2);
|
||||
} else if (((temp & VSYNC_PIPEA_FLAG) &&
|
||||
(vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
|
||||
((temp & VSYNC_PIPEB_FLAG) &&
|
||||
(vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
|
||||
atomic_inc(&dev->vbl_received);
|
||||
|
||||
DRM_WAKEUP(&dev->vbl_queue);
|
||||
drm_vbl_send_signals(dev);
|
||||
|
||||
if (dev_priv->swaps_pending > 0)
|
||||
drm_locked_tasklet(dev, i915_vblank_tasklet);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int i915_emit_irq(drm_device_t * dev)
|
||||
int i915_emit_irq(drm_device_t * dev)
|
||||
{
|
||||
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -98,6 +213,28 @@ static int i915_emit_irq(drm_device_t * dev)
|
|||
|
||||
}
|
||||
|
||||
void i915_user_irq_on(drm_i915_private_t *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->user_irq_lock);
|
||||
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
|
||||
dev_priv->irq_enable_reg |= USER_INT_FLAG;
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
|
||||
}
|
||||
spin_unlock(&dev_priv->user_irq_lock);
|
||||
|
||||
}
|
||||
|
||||
void i915_user_irq_off(drm_i915_private_t *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->user_irq_lock);
|
||||
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
|
||||
// dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
|
||||
// I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
|
||||
}
|
||||
spin_unlock(&dev_priv->user_irq_lock);
|
||||
}
|
||||
|
||||
|
||||
static int i915_wait_irq(drm_device_t * dev, int irq_nr)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
@ -110,9 +247,11 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
|
|||
return 0;
|
||||
|
||||
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||
|
||||
|
||||
i915_user_irq_on(dev_priv);
|
||||
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
i915_user_irq_off(dev_priv);
|
||||
|
||||
if (ret == DRM_ERR(EBUSY)) {
|
||||
DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
|
||||
|
@ -124,7 +263,8 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
||||
static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
|
||||
atomic_t *counter)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int cur_vblank;
|
||||
|
@ -136,7 +276,7 @@ int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
|||
}
|
||||
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
|
||||
(((cur_vblank = atomic_read(&dev->vbl_received))
|
||||
(((cur_vblank = atomic_read(counter))
|
||||
- *sequence) <= (1<<23)));
|
||||
|
||||
*sequence = cur_vblank;
|
||||
|
@ -144,6 +284,16 @@ int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
||||
{
|
||||
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
|
||||
}
|
||||
|
||||
int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
|
||||
{
|
||||
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
|
||||
}
|
||||
|
||||
/* Needs the lock as it touches the ring.
|
||||
*/
|
||||
int i915_irq_emit(DRM_IOCTL_ARGS)
|
||||
|
@ -192,23 +342,18 @@ int i915_irq_wait(DRM_IOCTL_ARGS)
|
|||
return i915_wait_irq(dev, irqwait.irq_seq);
|
||||
}
|
||||
|
||||
static int i915_enable_interrupt (drm_device_t *dev)
|
||||
static void i915_enable_interrupt (drm_device_t *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u16 flag;
|
||||
|
||||
flag = 0;
|
||||
dev_priv->irq_enable_reg = USER_INT_FLAG;
|
||||
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
|
||||
flag |= VSYNC_PIPEA_FLAG;
|
||||
dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
|
||||
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
|
||||
flag |= VSYNC_PIPEB_FLAG;
|
||||
if (dev_priv->vblank_pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
__FUNCTION__, dev_priv->vblank_pipe);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
|
||||
return 0;
|
||||
dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
|
||||
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
|
||||
dev_priv->irq_enabled = 1;
|
||||
}
|
||||
|
||||
/* Set the vblank monitor pipe
|
||||
|
@ -227,8 +372,17 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
|
|||
DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
|
||||
sizeof(pipe));
|
||||
|
||||
if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
__FUNCTION__, pipe.pipe);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
dev_priv->vblank_pipe = pipe.pipe;
|
||||
return i915_enable_interrupt (dev);
|
||||
|
||||
i915_enable_interrupt (dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
|
||||
|
@ -254,13 +408,125 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule buffer swap at given vertical blank.
|
||||
*/
|
||||
int i915_vblank_swap(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_vblank_swap_t swap;
|
||||
drm_i915_vbl_swap_t *vbl_swap;
|
||||
unsigned int pipe, seqtype, curseq;
|
||||
unsigned long irqflags;
|
||||
struct list_head *list;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __func__);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (dev_priv->sarea_priv->rotation) {
|
||||
DRM_DEBUG("Rotation not supported\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
|
||||
sizeof(swap));
|
||||
|
||||
if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
|
||||
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
|
||||
DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
|
||||
|
||||
seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
|
||||
|
||||
if (!(dev_priv->vblank_pipe & (1 << pipe))) {
|
||||
DRM_ERROR("Invalid pipe %d\n", pipe);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
||||
if (!drm_get_drawable_info(dev, swap.drawable)) {
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
DRM_ERROR("Invalid drawable ID %d\n", swap.drawable);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
|
||||
curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
|
||||
|
||||
if (seqtype == _DRM_VBLANK_RELATIVE)
|
||||
swap.sequence += curseq;
|
||||
|
||||
if ((curseq - swap.sequence) <= (1<<23)) {
|
||||
if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
|
||||
swap.sequence = curseq + 1;
|
||||
} else {
|
||||
DRM_DEBUG("Missed target sequence\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
|
||||
|
||||
list_for_each(list, &dev_priv->vbl_swaps.head) {
|
||||
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
|
||||
|
||||
if (vbl_swap->drw_id == swap.drawable &&
|
||||
vbl_swap->pipe == pipe &&
|
||||
vbl_swap->sequence == swap.sequence) {
|
||||
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
|
||||
DRM_DEBUG("Already scheduled\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
|
||||
|
||||
if (dev_priv->swaps_pending >= 100) {
|
||||
DRM_DEBUG("Too many swaps queued\n");
|
||||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
|
||||
vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
|
||||
|
||||
if (!vbl_swap) {
|
||||
DRM_ERROR("Failed to allocate memory to queue swap\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
vbl_swap->drw_id = swap.drawable;
|
||||
vbl_swap->pipe = pipe;
|
||||
vbl_swap->sequence = swap.sequence;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
|
||||
|
||||
list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
|
||||
dev_priv->swaps_pending++;
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
|
||||
sizeof(swap));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* drm_dma.h hooks
|
||||
*/
|
||||
void i915_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
I915_WRITE16(I915REG_HWSTAM, 0xfffe);
|
||||
I915_WRITE16(I915REG_HWSTAM, 0xeffe);
|
||||
I915_WRITE16(I915REG_INT_MASK_R, 0x0);
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
|
||||
}
|
||||
|
@ -269,8 +535,30 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
|
|||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
|
||||
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
|
||||
dev_priv->swaps_pending = 0;
|
||||
|
||||
if (!dev_priv->vblank_pipe)
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
||||
|
||||
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
|
||||
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
|
||||
dev_priv->swaps_pending = 0;
|
||||
|
||||
dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
|
||||
dev_priv->user_irq_refcount = 0;
|
||||
|
||||
if (!dev_priv->vblank_pipe)
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
||||
i915_enable_interrupt(dev);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
|
||||
|
||||
/*
|
||||
* Initialize the hardware status page IRQ location.
|
||||
*/
|
||||
|
||||
I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
|
||||
}
|
||||
|
||||
void i915_driver_irq_uninstall(drm_device_t * dev)
|
||||
|
@ -280,6 +568,7 @@ void i915_driver_irq_uninstall(drm_device_t * dev)
|
|||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
dev_priv->irq_enabled = 0;
|
||||
I915_WRITE16(I915REG_HWSTAM, 0xffff);
|
||||
I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
|
||||
|
|
|
@ -815,17 +815,18 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
|
||||
if (!dev_priv->ring_map) {
|
||||
DRM_ERROR("can not find ring map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
dev_priv->sarea_priv = (drm_mach64_sarea_t *)
|
||||
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
|
||||
|
||||
if (!dev_priv->is_pci) {
|
||||
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
|
||||
if (!dev_priv->ring_map) {
|
||||
DRM_ERROR("can not find ring map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
drm_core_ioremap(dev_priv->ring_map, dev);
|
||||
if (!dev_priv->ring_map->handle) {
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
|
@ -834,6 +835,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
|
|||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
dev->agp_buffer_token = init->buffers_offset;
|
||||
dev->agp_buffer_map =
|
||||
drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
|
@ -890,27 +892,9 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
|
|||
}
|
||||
}
|
||||
|
||||
/* allocate descriptor memory from pci pool */
|
||||
DRM_DEBUG("Allocating dma descriptor ring\n");
|
||||
dev_priv->ring.size = 0x4000; /* 16KB */
|
||||
|
||||
if (dev_priv->is_pci) {
|
||||
dev_priv->ring.dmah = drm_pci_alloc(dev, dev_priv->ring.size,
|
||||
dev_priv->ring.size,
|
||||
0xfffffffful);
|
||||
|
||||
if (!dev_priv->ring.dmah) {
|
||||
DRM_ERROR("Allocating dma descriptor ring failed\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
} else {
|
||||
dev_priv->ring.start = dev_priv->ring.dmah->vaddr;
|
||||
dev_priv->ring.start_addr =
|
||||
(u32) dev_priv->ring.dmah->busaddr;
|
||||
}
|
||||
} else {
|
||||
dev_priv->ring.start = dev_priv->ring_map->handle;
|
||||
dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
|
||||
}
|
||||
dev_priv->ring.start = dev_priv->ring_map->handle;
|
||||
dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
|
||||
|
||||
memset(dev_priv->ring.start, 0, dev_priv->ring.size);
|
||||
DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
|
||||
|
@ -1148,18 +1132,14 @@ int mach64_do_cleanup_dma(drm_device_t * dev)
|
|||
if (dev->dev_private) {
|
||||
drm_mach64_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->is_pci) {
|
||||
if (dev_priv->ring.dmah) {
|
||||
drm_pci_free(dev, dev_priv->ring.dmah);
|
||||
}
|
||||
} else {
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->ring_map)
|
||||
drm_core_ioremapfree(dev_priv->ring_map, dev);
|
||||
}
|
||||
|
||||
if (dev->agp_buffer_map) {
|
||||
drm_core_ioremapfree(dev->agp_buffer_map, dev);
|
||||
dev->agp_buffer_map = NULL;
|
||||
if (dev->agp_buffer_map) {
|
||||
drm_core_ioremapfree(dev->agp_buffer_map, dev);
|
||||
dev->agp_buffer_map = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
mach64_destroy_freelist(dev);
|
||||
|
@ -1328,17 +1308,88 @@ int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv)
|
||||
{
|
||||
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
|
||||
struct list_head *ptr;
|
||||
struct list_head *tmp;
|
||||
drm_mach64_freelist_t *entry;
|
||||
u32 head, tail, ofs;
|
||||
|
||||
mach64_ring_tick(dev_priv, ring);
|
||||
head = ring->head;
|
||||
tail = ring->tail;
|
||||
|
||||
if (head == tail) {
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
|
||||
DRM_ERROR("Empty ring with non-idle engine!\n");
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
/* last pass is complete, so release everything */
|
||||
mach64_do_release_used_buffers(dev_priv);
|
||||
DRM_DEBUG("%s: idle engine, freed all buffers.\n",
|
||||
__FUNCTION__);
|
||||
if (list_empty(&dev_priv->free_list)) {
|
||||
DRM_ERROR("Freelist empty with idle engine\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
/* Look for a completed buffer and bail out of the loop
|
||||
* as soon as we find one -- don't waste time trying
|
||||
* to free extra bufs here, leave that to do_release_used_buffers
|
||||
*/
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending) {
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
ofs = entry->ring_ofs;
|
||||
if (entry->discard &&
|
||||
((head < tail && (ofs < head || ofs >= tail)) ||
|
||||
(head > tail && (ofs < head && ofs >= tail)))) {
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
int i;
|
||||
|
||||
for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
|
||||
{
|
||||
u32 o1 = le32_to_cpu(((u32 *) ring->
|
||||
start)[i + 1]);
|
||||
u32 o2 = GETBUFADDR(entry->buf);
|
||||
|
||||
if (o1 == o2) {
|
||||
DRM_ERROR
|
||||
("Attempting to free used buffer: "
|
||||
"i=%d buf=0x%08x\n",
|
||||
i, o1);
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* found a processed buffer */
|
||||
entry->buf->pending = 0;
|
||||
list_del(ptr);
|
||||
list_add_tail(ptr, &dev_priv->free_list);
|
||||
DRM_DEBUG
|
||||
("%s: freed processed buffer (head=%d tail=%d "
|
||||
"buf ring ofs=%d).\n",
|
||||
__FUNCTION__, head, tail, ofs);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
|
||||
{
|
||||
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
|
||||
drm_mach64_freelist_t *entry;
|
||||
struct list_head *ptr;
|
||||
struct list_head *tmp;
|
||||
int t;
|
||||
|
||||
if (list_empty(&dev_priv->free_list)) {
|
||||
u32 head, tail, ofs;
|
||||
|
||||
if (list_empty(&dev_priv->pending)) {
|
||||
DRM_ERROR
|
||||
("Couldn't get buffer - pending and free lists empty\n");
|
||||
|
@ -1350,81 +1401,15 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
tail = ring->tail;
|
||||
for (t = 0; t < dev_priv->usec_timeout; t++) {
|
||||
mach64_ring_tick(dev_priv, ring);
|
||||
head = ring->head;
|
||||
int ret;
|
||||
|
||||
if (head == tail) {
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
if (MACH64_READ(MACH64_GUI_STAT) &
|
||||
MACH64_GUI_ACTIVE) {
|
||||
DRM_ERROR
|
||||
("Empty ring with non-idle engine!\n");
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
/* last pass is complete, so release everything */
|
||||
mach64_do_release_used_buffers(dev_priv);
|
||||
DRM_DEBUG
|
||||
("%s: idle engine, freed all buffers.\n",
|
||||
__FUNCTION__);
|
||||
if (list_empty(&dev_priv->free_list)) {
|
||||
DRM_ERROR
|
||||
("Freelist empty with idle engine\n");
|
||||
return NULL;
|
||||
}
|
||||
ret = mach64_do_reclaim_completed(dev_priv);
|
||||
if (ret == 0)
|
||||
goto _freelist_entry_found;
|
||||
}
|
||||
/* Look for a completed buffer and bail out of the loop
|
||||
* as soon as we find one -- don't waste time trying
|
||||
* to free extra bufs here, leave that to do_release_used_buffers
|
||||
*/
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending) {
|
||||
entry =
|
||||
list_entry(ptr, drm_mach64_freelist_t,
|
||||
list);
|
||||
ofs = entry->ring_ofs;
|
||||
if (entry->discard &&
|
||||
((head < tail
|
||||
&& (ofs < head || ofs >= tail))
|
||||
|| (head > tail
|
||||
&& (ofs < head && ofs >= tail)))) {
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
int i;
|
||||
if (ret < 0)
|
||||
return NULL;
|
||||
|
||||
for (i = head; i != tail;
|
||||
i = (i + 4) & ring->tail_mask) {
|
||||
u32 o1 =
|
||||
le32_to_cpu(((u32 *) ring->
|
||||
start)[i + 1]);
|
||||
u32 o2 = GETBUFADDR(entry->buf);
|
||||
|
||||
if (o1 == o2) {
|
||||
DRM_ERROR
|
||||
("Attempting to free used buffer: "
|
||||
"i=%d buf=0x%08x\n",
|
||||
i, o1);
|
||||
mach64_dump_ring_info
|
||||
(dev_priv);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* found a processed buffer */
|
||||
entry->buf->pending = 0;
|
||||
list_del(ptr);
|
||||
entry->buf->used = 0;
|
||||
list_add_tail(ptr,
|
||||
&dev_priv->placeholders);
|
||||
DRM_DEBUG
|
||||
("%s: freed processed buffer (head=%d tail=%d "
|
||||
"buf ring ofs=%d).\n",
|
||||
__FUNCTION__, head, tail, ofs);
|
||||
return entry->buf;
|
||||
}
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
|
@ -1443,6 +1428,33 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
|
|||
return entry->buf;
|
||||
}
|
||||
|
||||
int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf)
|
||||
{
|
||||
struct list_head *ptr;
|
||||
drm_mach64_freelist_t *entry;
|
||||
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
list_for_each(ptr, &dev_priv->pending) {
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
if (copy_buf == entry->buf) {
|
||||
DRM_ERROR("%s: Trying to release a pending buf\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ptr = dev_priv->placeholders.next;
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
copy_buf->pending = 0;
|
||||
copy_buf->used = 0;
|
||||
entry->buf = copy_buf;
|
||||
entry->discard = 1;
|
||||
list_del(ptr);
|
||||
list_add_tail(ptr, &dev_priv->free_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*@}*/
|
||||
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
/* Max number of swaps allowed on the ring
|
||||
* before the client must wait
|
||||
*/
|
||||
#define MACH64_MAX_QUEUED_FRAMES 3
|
||||
#define MACH64_MAX_QUEUED_FRAMES 3U
|
||||
|
||||
/* Byte offsets for host blit buffer data
|
||||
*/
|
||||
|
@ -237,7 +237,7 @@ typedef struct drm_mach64_vertex {
|
|||
} drm_mach64_vertex_t;
|
||||
|
||||
typedef struct drm_mach64_blit {
|
||||
int idx;
|
||||
void *buf;
|
||||
int pitch;
|
||||
int offset;
|
||||
int format;
|
||||
|
|
|
@ -42,9 +42,9 @@
|
|||
|
||||
#define DRIVER_NAME "mach64"
|
||||
#define DRIVER_DESC "DRM module for the ATI Rage Pro"
|
||||
#define DRIVER_DATE "20020904"
|
||||
#define DRIVER_DATE "20060718"
|
||||
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MAJOR 2
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
|
@ -61,7 +61,6 @@ typedef struct drm_mach64_freelist {
|
|||
} drm_mach64_freelist_t;
|
||||
|
||||
typedef struct drm_mach64_descriptor_ring {
|
||||
drm_dma_handle_t *dmah; /* Handle to pci dma memory */
|
||||
void *start; /* write pointer (cpu address) to start of descriptor ring */
|
||||
u32 start_addr; /* bus address of beginning of descriptor ring */
|
||||
int size; /* size of ring in bytes */
|
||||
|
@ -123,6 +122,8 @@ extern void mach64_driver_lastclose(drm_device_t * dev);
|
|||
extern int mach64_init_freelist(drm_device_t * dev);
|
||||
extern void mach64_destroy_freelist(drm_device_t * dev);
|
||||
extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv);
|
||||
extern int mach64_freelist_put(drm_mach64_private_t * dev_priv,
|
||||
drm_buf_t * copy_buf);
|
||||
|
||||
extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
|
||||
int entries);
|
||||
|
|
|
@ -480,16 +480,16 @@ static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
|
|||
/* Copy and verify a client submited buffer.
|
||||
* FIXME: Make an assembly optimized version
|
||||
*/
|
||||
static __inline__ int copy_and_verify_from_user(u32 *to,
|
||||
const u32 __user *ufrom,
|
||||
unsigned long bytes)
|
||||
static __inline__ int copy_from_user_vertex(u32 *to,
|
||||
const u32 __user *ufrom,
|
||||
unsigned long bytes)
|
||||
{
|
||||
unsigned long n = bytes; /* dwords remaining in buffer */
|
||||
u32 *from, *orig_from;
|
||||
|
||||
from = drm_alloc(bytes, DRM_MEM_DRIVER);
|
||||
if (from == NULL)
|
||||
return ENOMEM;
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
|
||||
drm_free(from, bytes, DRM_MEM_DRIVER);
|
||||
|
@ -546,12 +546,15 @@ static __inline__ int copy_and_verify_from_user(u32 *to,
|
|||
}
|
||||
|
||||
static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
|
||||
int prim, void *buf, unsigned long used,
|
||||
int discard)
|
||||
drm_mach64_vertex_t * vertex)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv = dev->dev_private;
|
||||
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
drm_buf_t *copy_buf;
|
||||
void *buf = vertex->buf;
|
||||
unsigned long used = vertex->used;
|
||||
int ret = 0;
|
||||
int i = 0;
|
||||
int done = 0;
|
||||
int verify_ret = 0;
|
||||
DMALOCALS;
|
||||
|
@ -559,100 +562,92 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
|
|||
DRM_DEBUG("%s: buf=%p used=%lu nbox=%d\n",
|
||||
__FUNCTION__, buf, used, sarea_priv->nbox);
|
||||
|
||||
if (used) {
|
||||
int ret = 0;
|
||||
int i = 0;
|
||||
if (!used)
|
||||
goto _vertex_done;
|
||||
|
||||
copy_buf = mach64_freelist_get(dev_priv);
|
||||
if (copy_buf == NULL) {
|
||||
DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EAGAIN);
|
||||
}
|
||||
copy_buf = mach64_freelist_get(dev_priv);
|
||||
if (copy_buf == NULL) {
|
||||
DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__);
|
||||
return DRM_ERR(EAGAIN);
|
||||
}
|
||||
|
||||
if ((verify_ret =
|
||||
copy_and_verify_from_user(GETBUFPTR(copy_buf), buf,
|
||||
used)) == 0) {
|
||||
verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
|
||||
|
||||
copy_buf->used = used;
|
||||
if (verify_ret != 0) {
|
||||
mach64_freelist_put(dev_priv, copy_buf);
|
||||
goto _vertex_done;
|
||||
}
|
||||
|
||||
DMASETPTR(copy_buf);
|
||||
copy_buf->used = used;
|
||||
|
||||
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
|
||||
ret = mach64_emit_state(filp, dev_priv);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
DMASETPTR(copy_buf);
|
||||
|
||||
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
|
||||
ret = mach64_emit_state(filp, dev_priv);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
do {
|
||||
/* Emit the next cliprect */
|
||||
if (i < sarea_priv->nbox) {
|
||||
ret = mach64_emit_cliprect(filp, dev_priv,
|
||||
&sarea_priv->boxes[i]);
|
||||
if (ret < 0) {
|
||||
/* failed to get buffer */
|
||||
return ret;
|
||||
} else if (ret != 0) {
|
||||
/* null intersection with scissor */
|
||||
continue;
|
||||
}
|
||||
|
||||
do {
|
||||
/* Emit the next cliprect */
|
||||
if (i < sarea_priv->nbox) {
|
||||
ret =
|
||||
mach64_emit_cliprect(filp, dev_priv,
|
||||
&sarea_priv->
|
||||
boxes[i]);
|
||||
if (ret < 0) {
|
||||
/* failed to get buffer */
|
||||
return ret;
|
||||
} else if (ret != 0) {
|
||||
/* null intersection with scissor */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if ((i >= sarea_priv->nbox - 1))
|
||||
done = 1;
|
||||
|
||||
/* Add the buffer to the DMA queue */
|
||||
DMAADVANCE(dev_priv, done);
|
||||
|
||||
} while (++i < sarea_priv->nbox);
|
||||
}
|
||||
if ((i >= sarea_priv->nbox - 1))
|
||||
done = 1;
|
||||
|
||||
if (copy_buf->pending && !done) {
|
||||
/* Add the buffer to the DMA queue */
|
||||
DMAADVANCE(dev_priv, done);
|
||||
|
||||
} while (++i < sarea_priv->nbox);
|
||||
|
||||
if (!done) {
|
||||
if (copy_buf->pending) {
|
||||
DMADISCARDBUF();
|
||||
} else if (!done) {
|
||||
/* This buffer wasn't used (no cliprects or verify failed), so place it back
|
||||
* on the free list
|
||||
} else {
|
||||
/* This buffer wasn't used (no cliprects), so place it
|
||||
* back on the free list
|
||||
*/
|
||||
struct list_head *ptr;
|
||||
drm_mach64_freelist_t *entry;
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
list_for_each(ptr, &dev_priv->pending) {
|
||||
entry =
|
||||
list_entry(ptr, drm_mach64_freelist_t,
|
||||
list);
|
||||
if (copy_buf == entry->buf) {
|
||||
DRM_ERROR
|
||||
("%s: Trying to release a pending buf\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ptr = dev_priv->placeholders.next;
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
copy_buf->pending = 0;
|
||||
copy_buf->used = 0;
|
||||
entry->buf = copy_buf;
|
||||
entry->discard = 1;
|
||||
list_del(ptr);
|
||||
list_add_tail(ptr, &dev_priv->free_list);
|
||||
mach64_freelist_put(dev_priv, copy_buf);
|
||||
}
|
||||
}
|
||||
|
||||
_vertex_done:
|
||||
sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS;
|
||||
sarea_priv->nbox = 0;
|
||||
|
||||
return verify_ret;
|
||||
}
|
||||
|
||||
static __inline__ int copy_from_user_blit(u32 *to,
|
||||
const u32 __user *ufrom,
|
||||
unsigned long bytes)
|
||||
{
|
||||
to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
|
||||
|
||||
if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
|
||||
drm_mach64_blit_t * blit)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv = dev->dev_private;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int dword_shift, dwords;
|
||||
drm_buf_t *buf;
|
||||
unsigned long used;
|
||||
drm_buf_t *copy_buf;
|
||||
int verify_ret = 0;
|
||||
DMALOCALS;
|
||||
|
||||
/* The compiler won't optimize away a division by a variable,
|
||||
|
@ -679,34 +674,34 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/* Dispatch the blit buffer.
|
||||
*/
|
||||
buf = dma->buflist[blit->idx];
|
||||
|
||||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d (filp %p) using buffer with filp %p\n",
|
||||
DRM_CURRENTPID, filp, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", blit->idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/* Set buf->used to the bytes of blit data based on the blit dimensions
|
||||
* and verify the size. When the setup is emitted to the buffer with
|
||||
* the DMA* macros below, buf->used is incremented to include the bytes
|
||||
* used for setup as well as the blit data.
|
||||
*/
|
||||
dwords = (blit->width * blit->height) >> dword_shift;
|
||||
buf->used = dwords << 2;
|
||||
if (buf->used <= 0 ||
|
||||
buf->used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
|
||||
DRM_ERROR("Invalid blit size: %d bytes\n", buf->used);
|
||||
used = dwords << 2;
|
||||
if (used <= 0 ||
|
||||
used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
|
||||
DRM_ERROR("Invalid blit size: %lu bytes\n", used);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
copy_buf = mach64_freelist_get(dev_priv);
|
||||
if (copy_buf == NULL) {
|
||||
DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__);
|
||||
return DRM_ERR(EAGAIN);
|
||||
}
|
||||
|
||||
verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
|
||||
|
||||
if (verify_ret != 0) {
|
||||
mach64_freelist_put(dev_priv, copy_buf);
|
||||
goto _blit_done;
|
||||
}
|
||||
|
||||
copy_buf->used = used;
|
||||
|
||||
/* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
|
||||
* continuation buffers?
|
||||
*/
|
||||
|
@ -715,7 +710,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
|
|||
* a register command every 16 dwords. State setup is added at the start of the
|
||||
* buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
|
||||
*/
|
||||
DMASETPTR(buf);
|
||||
DMASETPTR(copy_buf);
|
||||
|
||||
DMAOUTREG(MACH64_Z_CNTL, 0);
|
||||
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
|
||||
|
@ -745,12 +740,13 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
|
|||
DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
|
||||
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
|
||||
|
||||
DRM_DEBUG("%s: %d bytes\n", __FUNCTION__, buf->used);
|
||||
DRM_DEBUG("%s: %lu bytes\n", __FUNCTION__, used);
|
||||
|
||||
/* Add the buffer to the queue */
|
||||
DMAADVANCEHOSTDATA(dev_priv);
|
||||
|
||||
return 0;
|
||||
_blit_done:
|
||||
return verify_ret;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
|
@ -842,14 +838,12 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS)
|
|||
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
|
||||
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
|
||||
|
||||
return mach64_dma_dispatch_vertex(filp, dev, vertex.prim, vertex.buf,
|
||||
vertex.used, vertex.discard);
|
||||
return mach64_dma_dispatch_vertex(filp, dev, &vertex);
|
||||
}
|
||||
|
||||
int mach64_dma_blit(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_mach64_private_t *dev_priv = dev->dev_private;
|
||||
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
drm_mach64_blit_t blit;
|
||||
|
@ -860,15 +854,6 @@ int mach64_dma_blit(DRM_IOCTL_ARGS)
|
|||
DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data,
|
||||
sizeof(blit));
|
||||
|
||||
DRM_DEBUG("%s: pid=%d index=%d\n",
|
||||
__FUNCTION__, DRM_CURRENTPID, blit.idx);
|
||||
|
||||
if (blit.idx < 0 || blit.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
blit.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
ret = mach64_dma_dispatch_blit(filp, dev, &blit);
|
||||
|
||||
/* Make sure we restore the 3D state next time.
|
||||
|
|
|
@ -538,6 +538,36 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
u32 *cmd = (u32 *) cmdbuf->buf;
|
||||
int count, ret;
|
||||
RING_LOCALS;
|
||||
|
||||
count=(cmd[0]>>16) & 0x3fff;
|
||||
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
ret = r300_check_offset(dev_priv, cmd[2]);
|
||||
if (ret) {
|
||||
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
BEGIN_RING(count+2);
|
||||
OUT_RING(cmd[0]);
|
||||
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += (count+2)*4;
|
||||
cmdbuf->bufsz -= (count+2)*4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
|
@ -578,10 +608,11 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
case RADEON_CNTL_BITBLT_MULTI:
|
||||
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
|
||||
|
||||
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
|
||||
return r300_emit_indx_buffer(dev_priv, cmdbuf);
|
||||
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
|
||||
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
|
||||
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
|
||||
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
|
||||
case RADEON_WAIT_FOR_IDLE:
|
||||
case RADEON_CP_NOP:
|
||||
/* these packets are safe */
|
||||
|
|
|
@ -1130,7 +1130,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
|
|||
| (dev_priv->fb_location >> 16));
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
|
||||
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
|
||||
(((dev_priv->gart_vm_start - 1 +
|
||||
|
@ -1158,7 +1158,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
|
|||
dev_priv->ring.tail = cur_read_ptr;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
|
||||
dev_priv->ring_rptr->offset
|
||||
- dev->agp->base + dev_priv->gart_vm_start);
|
||||
|
@ -1301,7 +1301,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
|||
{
|
||||
u32 tmp;
|
||||
|
||||
if (dev_priv->flags & CHIP_IS_PCIE) {
|
||||
if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||
radeon_set_pciegart(dev_priv, on);
|
||||
return;
|
||||
}
|
||||
|
@ -1339,26 +1339,26 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
DRM_DEBUG("\n");
|
||||
|
||||
/* if we require new memory map but we don't have it fail */
|
||||
if ((dev_priv->flags & CHIP_NEW_MEMMAP) && !dev_priv->new_memmap)
|
||||
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap)
|
||||
{
|
||||
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
|
||||
if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP))
|
||||
{
|
||||
DRM_DEBUG("Forcing AGP card to PCI mode\n");
|
||||
dev_priv->flags &= ~CHIP_IS_AGP;
|
||||
dev_priv->flags &= ~RADEON_IS_AGP;
|
||||
}
|
||||
else if (!(dev_priv->flags & (CHIP_IS_AGP | CHIP_IS_PCI | CHIP_IS_PCIE))
|
||||
else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
|
||||
&& !init->is_pci)
|
||||
{
|
||||
DRM_DEBUG("Restoring AGP flag\n");
|
||||
dev_priv->flags |= CHIP_IS_AGP;
|
||||
dev_priv->flags |= RADEON_IS_AGP;
|
||||
}
|
||||
|
||||
if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
|
||||
if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
|
||||
DRM_ERROR("PCI GART memory not allocated!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -1501,7 +1501,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
init->sarea_priv_offset);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
drm_core_ioremap(dev_priv->cp_ring, dev);
|
||||
drm_core_ioremap(dev_priv->ring_rptr, dev);
|
||||
drm_core_ioremap(dev->agp_buffer_map, dev);
|
||||
|
@ -1560,7 +1560,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
* align it down.
|
||||
*/
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
base = dev->agp->base;
|
||||
/* Check if valid */
|
||||
if ((base + dev_priv->gart_size) > dev_priv->fb_location &&
|
||||
|
@ -1590,7 +1590,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP)
|
||||
if (dev_priv->flags & RADEON_IS_AGP)
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
@ -1616,7 +1616,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
/* Turn off PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
} else
|
||||
|
@ -1636,7 +1636,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
dev_priv->gart_info.mapping.handle;
|
||||
|
||||
dev_priv->gart_info.is_pcie =
|
||||
!!(dev_priv->flags & CHIP_IS_PCIE);
|
||||
!!(dev_priv->flags & RADEON_IS_PCIE);
|
||||
dev_priv->gart_info.gart_table_location =
|
||||
DRM_ATI_GART_FB;
|
||||
|
||||
|
@ -1648,7 +1648,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
DRM_ATI_GART_MAIN;
|
||||
dev_priv->gart_info.addr = NULL;
|
||||
dev_priv->gart_info.bus_addr = 0;
|
||||
if (dev_priv->flags & CHIP_IS_PCIE) {
|
||||
if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||
DRM_ERROR
|
||||
("Cannot use PCI Express without GART in FB memory\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
|
@ -1690,7 +1690,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
|
|||
drm_irq_uninstall(dev);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
if (dev_priv->cp_ring != NULL) {
|
||||
drm_core_ioremapfree(dev_priv->cp_ring, dev);
|
||||
dev_priv->cp_ring = NULL;
|
||||
|
@ -1745,7 +1745,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
|
|||
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
/* Turn off PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
} else
|
||||
|
@ -2194,7 +2194,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
dev->dev_private = (void *)dev_priv;
|
||||
dev_priv->flags = flags;
|
||||
|
||||
switch (flags & CHIP_FAMILY_MASK) {
|
||||
switch (flags & RADEON_FAMILY_MASK) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_R200:
|
||||
|
@ -2202,7 +2202,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
case CHIP_R350:
|
||||
case CHIP_R420:
|
||||
case CHIP_RV410:
|
||||
dev_priv->flags |= CHIP_HAS_HIERZ;
|
||||
dev_priv->flags |= RADEON_HAS_HIERZ;
|
||||
break;
|
||||
default:
|
||||
/* all other chips have no hierarchical z buffer */
|
||||
|
@ -2210,14 +2210,14 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
}
|
||||
|
||||
if (drm_device_is_agp(dev))
|
||||
dev_priv->flags |= CHIP_IS_AGP;
|
||||
dev_priv->flags |= RADEON_IS_AGP;
|
||||
else if (drm_device_is_pcie(dev))
|
||||
dev_priv->flags |= CHIP_IS_PCIE;
|
||||
dev_priv->flags |= RADEON_IS_PCIE;
|
||||
else
|
||||
dev_priv->flags |= CHIP_IS_PCI;
|
||||
dev_priv->flags |= RADEON_IS_PCI;
|
||||
|
||||
DRM_DEBUG("%s card detected\n",
|
||||
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
|
||||
((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,16 +134,16 @@ enum radeon_cp_microcode_version {
|
|||
* Chip flags
|
||||
*/
|
||||
enum radeon_chip_flags {
|
||||
CHIP_FAMILY_MASK = 0x0000ffffUL,
|
||||
CHIP_FLAGS_MASK = 0xffff0000UL,
|
||||
CHIP_IS_MOBILITY = 0x00010000UL,
|
||||
CHIP_IS_IGP = 0x00020000UL,
|
||||
CHIP_SINGLE_CRTC = 0x00040000UL,
|
||||
CHIP_IS_AGP = 0x00080000UL,
|
||||
CHIP_HAS_HIERZ = 0x00100000UL,
|
||||
CHIP_IS_PCIE = 0x00200000UL,
|
||||
CHIP_NEW_MEMMAP = 0x00400000UL,
|
||||
CHIP_IS_PCI = 0x00800000UL,
|
||||
RADEON_FAMILY_MASK = 0x0000ffffUL,
|
||||
RADEON_FLAGS_MASK = 0xffff0000UL,
|
||||
RADEON_IS_MOBILITY = 0x00010000UL,
|
||||
RADEON_IS_IGP = 0x00020000UL,
|
||||
RADEON_SINGLE_CRTC = 0x00040000UL,
|
||||
RADEON_IS_AGP = 0x00080000UL,
|
||||
RADEON_HAS_HIERZ = 0x00100000UL,
|
||||
RADEON_IS_PCIE = 0x00200000UL,
|
||||
RADEON_NEW_MEMMAP = 0x00400000UL,
|
||||
RADEON_IS_PCI = 0x00800000UL,
|
||||
};
|
||||
|
||||
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
|
||||
|
@ -423,6 +423,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
|
|||
#define RADEON_RB3D_COLOROFFSET 0x1c40
|
||||
#define RADEON_RB3D_COLORPITCH 0x1c48
|
||||
|
||||
#define RADEON_SRC_X_Y 0x1590
|
||||
|
||||
#define RADEON_DP_GUI_MASTER_CNTL 0x146c
|
||||
# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
|
||||
# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
|
||||
|
@ -440,6 +442,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
|
|||
# define RADEON_ROP3_S 0x00cc0000
|
||||
# define RADEON_ROP3_P 0x00f00000
|
||||
#define RADEON_DP_WRITE_MASK 0x16cc
|
||||
#define RADEON_SRC_PITCH_OFFSET 0x1428
|
||||
#define RADEON_DST_PITCH_OFFSET 0x142c
|
||||
#define RADEON_DST_PITCH_OFFSET_C 0x1c80
|
||||
# define RADEON_DST_TILE_LINEAR (0 << 30)
|
||||
|
@ -1095,7 +1098,7 @@ do { \
|
|||
n, __FUNCTION__ ); \
|
||||
} \
|
||||
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
|
||||
COMMIT_RING(); \
|
||||
COMMIT_RING(); \
|
||||
radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \
|
||||
} \
|
||||
_nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
|
||||
|
|
|
@ -42,7 +42,11 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
|||
drm_file_t * filp_priv,
|
||||
u32 * offset)
|
||||
{
|
||||
u32 off = *offset;
|
||||
u64 off = *offset;
|
||||
u32 fb_start = dev_priv->fb_location;
|
||||
u32 fb_end = fb_start + dev_priv->fb_size - 1;
|
||||
u32 gart_start = dev_priv->gart_vm_start;
|
||||
u32 gart_end = gart_start + dev_priv->gart_size - 1;
|
||||
struct drm_radeon_driver_file_fields *radeon_priv;
|
||||
|
||||
/* Hrm ... the story of the offset ... So this function converts
|
||||
|
@ -62,10 +66,8 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
|||
/* First, the best case, the offset already lands in either the
|
||||
* framebuffer or the GART mapped space
|
||||
*/
|
||||
if ((off >= dev_priv->fb_location &&
|
||||
off < (dev_priv->fb_location + dev_priv->fb_size)) ||
|
||||
(off >= dev_priv->gart_vm_start &&
|
||||
off < (dev_priv->gart_vm_start + dev_priv->gart_size)))
|
||||
if ((off >= fb_start && off <= fb_end) ||
|
||||
(off >= gart_start && off <= gart_end))
|
||||
return 0;
|
||||
|
||||
/* Ok, that didn't happen... now check if we have a zero based
|
||||
|
@ -78,16 +80,13 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
|||
}
|
||||
|
||||
/* Finally, assume we aimed at a GART offset if beyond the fb */
|
||||
if (off > (dev_priv->fb_location + dev_priv->fb_size))
|
||||
off = off - (dev_priv->fb_location + dev_priv->fb_size) +
|
||||
dev_priv->gart_vm_start;
|
||||
if (off > fb_end)
|
||||
off = off - fb_end - 1 + gart_start;
|
||||
|
||||
/* Now recheck and fail if out of bounds */
|
||||
if ((off >= dev_priv->fb_location &&
|
||||
off < (dev_priv->fb_location + dev_priv->fb_size)) ||
|
||||
(off >= dev_priv->gart_vm_start &&
|
||||
off < (dev_priv->gart_vm_start + dev_priv->gart_size))) {
|
||||
DRM_DEBUG("offset fixed up to 0x%x\n", off);
|
||||
if ((off >= fb_start && off <= fb_end) ||
|
||||
(off >= gart_start && off <= gart_end)) {
|
||||
DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
|
||||
*offset = off;
|
||||
return 0;
|
||||
}
|
||||
|
@ -175,6 +174,14 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
}
|
||||
break;
|
||||
|
||||
case R200_EMIT_VAP_CTL: {
|
||||
RING_LOCALS;
|
||||
BEGIN_RING(2);
|
||||
OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_EMIT_RB3D_COLORPITCH:
|
||||
case RADEON_EMIT_RE_LINE_PATTERN:
|
||||
case RADEON_EMIT_SE_LINE_WIDTH:
|
||||
|
@ -202,7 +209,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
|
||||
case R200_EMIT_TFACTOR_0:
|
||||
case R200_EMIT_VTX_FMT_0:
|
||||
case R200_EMIT_VAP_CTL:
|
||||
case R200_EMIT_MATRIX_SELECT_0:
|
||||
case R200_EMIT_TEX_PROC_CTL_2:
|
||||
case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
|
||||
|
@ -269,6 +275,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
unsigned int *cmdsz)
|
||||
{
|
||||
u32 *cmd = (u32 *) cmdbuf->buf;
|
||||
u32 offset, narrays;
|
||||
int count, i, k;
|
||||
|
||||
*cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
|
||||
|
||||
|
@ -282,10 +290,106 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/* Check client state and fix it up if necessary */
|
||||
if (cmd[0] & 0x8000) { /* MSB of opcode: next DWORD GUI_CNTL */
|
||||
u32 offset;
|
||||
switch(cmd[0] & 0xff00) {
|
||||
/* XXX Are there old drivers needing other packets? */
|
||||
|
||||
case RADEON_3D_DRAW_IMMD:
|
||||
case RADEON_3D_DRAW_VBUF:
|
||||
case RADEON_3D_DRAW_INDX:
|
||||
case RADEON_WAIT_FOR_IDLE:
|
||||
case RADEON_CP_NOP:
|
||||
case RADEON_3D_CLEAR_ZMASK:
|
||||
/* case RADEON_CP_NEXT_CHAR:
|
||||
case RADEON_CP_PLY_NEXTSCAN:
|
||||
case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
|
||||
/* these packets are safe */
|
||||
break;
|
||||
|
||||
case RADEON_CP_3D_DRAW_IMMD_2:
|
||||
case RADEON_CP_3D_DRAW_VBUF_2:
|
||||
case RADEON_CP_3D_DRAW_INDX_2:
|
||||
case RADEON_3D_CLEAR_HIZ:
|
||||
/* safe but r200 only */
|
||||
if (dev_priv->microcode_version != UCODE_R200) {
|
||||
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_3D_LOAD_VBPNTR:
|
||||
count = (cmd[0] >> 16) & 0x3fff;
|
||||
|
||||
if (count > 18) { /* 12 arrays max */
|
||||
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
|
||||
count);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/* carefully check packet contents */
|
||||
narrays = cmd[1] & ~0xc000;
|
||||
k = 0;
|
||||
i = 2;
|
||||
while ((k < narrays) && (i < (count + 2))) {
|
||||
i++; /* skip attribute field */
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) {
|
||||
DRM_ERROR
|
||||
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
if (k == narrays)
|
||||
break;
|
||||
/* have one more to process, they come in pairs */
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) {
|
||||
DRM_ERROR
|
||||
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
}
|
||||
/* do the counts match what we expect ? */
|
||||
if ((k != narrays) || (i != (count + 2))) {
|
||||
DRM_ERROR
|
||||
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
|
||||
k, i, narrays, count + 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_3D_RNDR_GEN_INDX_PRIM:
|
||||
if (dev_priv->microcode_version != UCODE_R100) {
|
||||
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) {
|
||||
DRM_ERROR("Invalid rndr_gen_indx offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_CP_INDX_BUFFER:
|
||||
if (dev_priv->microcode_version != UCODE_R200) {
|
||||
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) {
|
||||
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_CNTL_HOSTDATA_BLT:
|
||||
case RADEON_CNTL_PAINT_MULTI:
|
||||
case RADEON_CNTL_BITBLT_MULTI:
|
||||
/* MSB of opcode: next DWORD GUI_CNTL */
|
||||
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
|
||||
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
|
||||
offset = cmd[2] << 10;
|
||||
|
@ -307,6 +411,11 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
}
|
||||
cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -862,7 +971,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
*/
|
||||
dev_priv->sarea_priv->ctx_owner = 0;
|
||||
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ)
|
||||
if ((dev_priv->flags & RADEON_HAS_HIERZ)
|
||||
&& (flags & RADEON_USE_HIERZ)) {
|
||||
/* FIXME : reverse engineer that for Rx00 cards */
|
||||
/* FIXME : the mask supposedly contains low-res z values. So can't set
|
||||
|
@ -907,7 +1016,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
for (i = 0; i < nbox; i++) {
|
||||
int tileoffset, nrtilesx, nrtilesy, j;
|
||||
/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ)
|
||||
if ((dev_priv->flags & RADEON_HAS_HIERZ)
|
||||
&& !(dev_priv->microcode_version == UCODE_R200)) {
|
||||
/* FIXME : figure this out for r200 (when hierz is enabled). Or
|
||||
maybe r200 actually doesn't need to put the low-res z value into
|
||||
|
@ -991,7 +1100,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
}
|
||||
|
||||
/* TODO don't always clear all hi-level z tiles */
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ)
|
||||
if ((dev_priv->flags & RADEON_HAS_HIERZ)
|
||||
&& (dev_priv->microcode_version == UCODE_R200)
|
||||
&& (flags & RADEON_USE_HIERZ))
|
||||
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
|
||||
|
@ -1263,9 +1372,9 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
|
|||
|
||||
DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
|
||||
|
||||
BEGIN_RING(7);
|
||||
BEGIN_RING(9);
|
||||
|
||||
OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
|
||||
OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
|
||||
OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_BRUSH_NONE |
|
||||
|
@ -1277,6 +1386,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
|
|||
|
||||
/* Make this work even if front & back are flipped:
|
||||
*/
|
||||
OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
|
||||
if (dev_priv->current_page == 0) {
|
||||
OUT_RING(dev_priv->back_pitch_offset);
|
||||
OUT_RING(dev_priv->front_pitch_offset);
|
||||
|
@ -1285,6 +1395,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
|
|||
OUT_RING(dev_priv->back_pitch_offset);
|
||||
}
|
||||
|
||||
OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
|
||||
OUT_RING((x << 16) | y);
|
||||
OUT_RING((x << 16) | y);
|
||||
OUT_RING((w << 16) | h);
|
||||
|
@ -2653,10 +2764,10 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
|
|||
int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
|
||||
RING_LOCALS;
|
||||
|
||||
if (!sz)
|
||||
return 0;
|
||||
if (sz * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
if (!sz)
|
||||
return 0;
|
||||
if (sz * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
BEGIN_RING(5 + sz);
|
||||
OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
|
||||
|
@ -3032,9 +3143,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
|
||||
case RADEON_PARAM_CARD_TYPE:
|
||||
if (dev_priv->flags & CHIP_IS_PCIE)
|
||||
if (dev_priv->flags & RADEON_IS_PCIE)
|
||||
value = RADEON_CARD_PCIE;
|
||||
else if (dev_priv->flags & CHIP_IS_AGP)
|
||||
else if (dev_priv->flags & RADEON_IS_AGP)
|
||||
value = RADEON_CARD_AGP;
|
||||
else
|
||||
value = RADEON_CARD_PCI;
|
||||
|
|
|
@ -725,6 +725,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
|
|||
dev_priv->status = NULL;
|
||||
}
|
||||
if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
|
||||
dev->agp_buffer_token = init->buffers_offset;
|
||||
dev->agp_buffer_map = drm_core_findmap(dev,
|
||||
init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
|
|
|
@ -40,8 +40,8 @@
|
|||
#define DRIVER_PATCHLEVEL 1
|
||||
|
||||
enum sis_family {
|
||||
SIS_OTHER = 0,
|
||||
SIS_CHIP_315 = 1,
|
||||
SIS_OTHER = 0,
|
||||
SIS_CHIP_315 = 1,
|
||||
};
|
||||
|
||||
#if defined(__linux__)
|
||||
|
@ -52,18 +52,18 @@ enum sis_family {
|
|||
#include "drm_sman.h"
|
||||
|
||||
#define SIS_BASE (dev_priv->mmio)
|
||||
#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
|
||||
#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
|
||||
#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
|
||||
|
||||
typedef struct drm_sis_private {
|
||||
drm_local_map_t *mmio;
|
||||
unsigned idle_fault;
|
||||
drm_sman_t sman;
|
||||
unsigned long chipset;
|
||||
int vram_initialized;
|
||||
int agp_initialized;
|
||||
unsigned long vram_offset;
|
||||
unsigned long agp_offset;
|
||||
drm_local_map_t *mmio;
|
||||
unsigned idle_fault;
|
||||
drm_sman_t sman;
|
||||
unsigned long chipset;
|
||||
int vram_initialized;
|
||||
int agp_initialized;
|
||||
unsigned long vram_offset;
|
||||
unsigned long agp_offset;
|
||||
} drm_sis_private_t;
|
||||
|
||||
extern int sis_idle(drm_device_t *dev);
|
||||
|
|
|
@ -44,14 +44,14 @@
|
|||
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
|
||||
|
||||
/* defines for VIA 3D registers */
|
||||
#define VIA_REG_STATUS 0x400
|
||||
#define VIA_REG_TRANSET 0x43C
|
||||
#define VIA_REG_STATUS 0x400
|
||||
#define VIA_REG_TRANSET 0x43C
|
||||
#define VIA_REG_TRANSPACE 0x440
|
||||
|
||||
/* VIA_REG_STATUS(0x400): Engine Status */
|
||||
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
|
||||
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
|
||||
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
|
||||
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
|
||||
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
|
||||
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
|
||||
|
||||
#define SetReg2DAGP(nReg, nData) { \
|
||||
|
@ -120,7 +120,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
|
|||
next_addr = cur_addr + size + 512*1024;
|
||||
count = 1000000;
|
||||
do {
|
||||
hw_addr = *hw_addr_ptr - agp_base;
|
||||
hw_addr = *hw_addr_ptr - agp_base;
|
||||
if (count-- == 0) {
|
||||
DRM_ERROR
|
||||
("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
|
||||
|
@ -247,10 +247,10 @@ static int via_dma_init(DRM_IOCTL_ARGS)
|
|||
else
|
||||
retcode = via_dma_cleanup(dev);
|
||||
break;
|
||||
case VIA_DMA_INITIALIZED:
|
||||
case VIA_DMA_INITIALIZED:
|
||||
retcode = (dev_priv->ring.virtual_start != NULL) ?
|
||||
0: DRM_ERR( EFAULT );
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
retcode = DRM_ERR(EINVAL);
|
||||
break;
|
||||
|
@ -406,7 +406,7 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
|
|||
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
|
||||
uint32_t * vb, int qw_count)
|
||||
{
|
||||
for (; qw_count > 0; --qw_count) {
|
||||
for (; qw_count > 0; --qw_count) {
|
||||
VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
|
||||
}
|
||||
return vb;
|
||||
|
@ -471,7 +471,7 @@ static int via_hook_segment(drm_via_private_t *dev_priv,
|
|||
}
|
||||
|
||||
if (paused && !no_pci_fire) {
|
||||
uint32_t rgtr,ptr;
|
||||
uint32_t rgtr,ptr;
|
||||
uint32_t ptr_low;
|
||||
|
||||
count = 1000000;
|
||||
|
|
|
@ -47,12 +47,12 @@
|
|||
#define VIA_DRM_DRIVER_MAJOR 2
|
||||
#define VIA_DRM_DRIVER_MINOR 10
|
||||
#define VIA_DRM_DRIVER_PATCHLEVEL 2
|
||||
#define VIA_DRM_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR))
|
||||
#define VIA_DRM_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR))
|
||||
|
||||
#define VIA_NR_SAREA_CLIPRECTS 8
|
||||
#define VIA_NR_XVMC_PORTS 10
|
||||
#define VIA_NR_XVMC_LOCKS 5
|
||||
#define VIA_MAX_CACHELINE_SIZE 64
|
||||
#define VIA_NR_XVMC_PORTS 10
|
||||
#define VIA_NR_XVMC_LOCKS 5
|
||||
#define VIA_MAX_CACHELINE_SIZE 64
|
||||
#define XVMCLOCKPTR(saPriv,lockNo) \
|
||||
((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
|
||||
(VIA_MAX_CACHELINE_SIZE - 1)) & \
|
||||
|
@ -67,29 +67,29 @@
|
|||
|
||||
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
|
||||
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
|
||||
#define VIA_UPLOAD_CTX 0x4
|
||||
#define VIA_UPLOAD_CTX 0x4
|
||||
#define VIA_UPLOAD_BUFFERS 0x8
|
||||
#define VIA_UPLOAD_TEX0 0x10
|
||||
#define VIA_UPLOAD_TEX1 0x20
|
||||
#define VIA_UPLOAD_CLIPRECTS 0x40
|
||||
#define VIA_UPLOAD_ALL 0xff
|
||||
#define VIA_UPLOAD_ALL 0xff
|
||||
|
||||
/* VIA specific ioctls */
|
||||
#define DRM_VIA_ALLOCMEM 0x00
|
||||
#define DRM_VIA_FREEMEM 0x01
|
||||
#define DRM_VIA_FREEMEM 0x01
|
||||
#define DRM_VIA_AGP_INIT 0x02
|
||||
#define DRM_VIA_FB_INIT 0x03
|
||||
#define DRM_VIA_FB_INIT 0x03
|
||||
#define DRM_VIA_MAP_INIT 0x04
|
||||
#define DRM_VIA_DEC_FUTEX 0x05
|
||||
#define NOT_USED
|
||||
#define DRM_VIA_DMA_INIT 0x07
|
||||
#define DRM_VIA_CMDBUFFER 0x08
|
||||
#define DRM_VIA_FLUSH 0x09
|
||||
#define DRM_VIA_PCICMD 0x0a
|
||||
#define DRM_VIA_FLUSH 0x09
|
||||
#define DRM_VIA_PCICMD 0x0a
|
||||
#define DRM_VIA_CMDBUF_SIZE 0x0b
|
||||
#define NOT_USED
|
||||
#define DRM_VIA_WAIT_IRQ 0x0d
|
||||
#define DRM_VIA_DMA_BLIT 0x0e
|
||||
#define DRM_VIA_WAIT_IRQ 0x0d
|
||||
#define DRM_VIA_DMA_BLIT 0x0e
|
||||
#define DRM_VIA_BLIT_SYNC 0x0f
|
||||
|
||||
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
|
||||
|
@ -172,7 +172,7 @@ typedef struct _drm_via_dma_init {
|
|||
enum {
|
||||
VIA_INIT_DMA = 0x01,
|
||||
VIA_CLEANUP_DMA = 0x02,
|
||||
VIA_DMA_INITIALIZED = 0x03
|
||||
VIA_DMA_INITIALIZED = 0x03
|
||||
} func;
|
||||
|
||||
unsigned long offset;
|
||||
|
@ -217,7 +217,7 @@ typedef struct _drm_via_sarea {
|
|||
|
||||
/* Used by the 3d driver only at this point, for pageflipping:
|
||||
*/
|
||||
unsigned int pfCurrentOffset;
|
||||
unsigned int pfCurrentOffset;
|
||||
} drm_via_sarea_t;
|
||||
|
||||
typedef struct _drm_via_cmdbuf_size {
|
||||
|
@ -273,17 +273,17 @@ typedef struct drm_via_blitsync {
|
|||
*/
|
||||
|
||||
typedef struct drm_via_dmablit {
|
||||
uint32_t num_lines;
|
||||
uint32_t line_length;
|
||||
uint32_t num_lines;
|
||||
uint32_t line_length;
|
||||
|
||||
uint32_t fb_addr;
|
||||
uint32_t fb_stride;
|
||||
uint32_t fb_addr;
|
||||
uint32_t fb_stride;
|
||||
|
||||
unsigned char *mem_addr;
|
||||
uint32_t mem_stride;
|
||||
unsigned char *mem_addr;
|
||||
uint32_t mem_stride;
|
||||
|
||||
uint32_t flags;
|
||||
int to_fb;
|
||||
int to_fb;
|
||||
|
||||
drm_via_blitsync_t sync;
|
||||
} drm_via_dmablit_t;
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "via_drm.h"
|
||||
#include "via_drv.h"
|
||||
|
|
|
@ -76,9 +76,9 @@ typedef struct drm_via_private {
|
|||
volatile uint32_t *last_pause_ptr;
|
||||
volatile uint32_t *hw_addr_ptr;
|
||||
drm_via_ring_buffer_t ring;
|
||||
struct timeval last_vblank;
|
||||
int last_vblank_valid;
|
||||
unsigned usec_per_vblank;
|
||||
struct timeval last_vblank;
|
||||
int last_vblank_valid;
|
||||
unsigned usec_per_vblank;
|
||||
drm_via_state_t hc_state;
|
||||
char pci_buf[VIA_PCI_BUF_SIZE];
|
||||
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
|
||||
|
@ -90,14 +90,14 @@ typedef struct drm_via_private {
|
|||
uint32_t irq_enable_mask;
|
||||
uint32_t irq_pending_mask;
|
||||
int *irq_map;
|
||||
/* Memory manager stuff */
|
||||
/* Memory manager stuff */
|
||||
#ifdef VIA_HAVE_CORE_MM
|
||||
unsigned idle_fault;
|
||||
unsigned int idle_fault;
|
||||
drm_sman_t sman;
|
||||
int vram_initialized;
|
||||
int agp_initialized;
|
||||
unsigned long vram_offset;
|
||||
unsigned long agp_offset;
|
||||
unsigned long vram_offset;
|
||||
unsigned long agp_offset;
|
||||
#endif
|
||||
#ifdef VIA_HAVE_DMABLIT
|
||||
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
|
||||
|
@ -148,8 +148,6 @@ extern int via_driver_dma_quiescent(drm_device_t * dev);
|
|||
extern void via_init_futex(drm_via_private_t *dev_priv);
|
||||
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
|
||||
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
|
||||
extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
|
||||
int force_sequence, unsigned int *sequence);
|
||||
|
||||
#ifdef VIA_HAVE_CORE_MM
|
||||
extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
#define VIA_REG_INTERRUPT 0x200
|
||||
|
||||
/* VIA_REG_INTERRUPT */
|
||||
#define VIA_IRQ_GLOBAL (1 << 31)
|
||||
#define VIA_IRQ_GLOBAL (1 << 31)
|
||||
#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
|
||||
#define VIA_IRQ_VBLANK_PENDING (1 << 3)
|
||||
#define VIA_IRQ_HQV0_ENABLE (1 << 11)
|
||||
|
@ -93,8 +93,8 @@ static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
|
|||
static unsigned time_diff(struct timeval *now,struct timeval *then)
|
||||
{
|
||||
return (now->tv_usec >= then->tv_usec) ?
|
||||
now->tv_usec - then->tv_usec :
|
||||
1000000 - (then->tv_usec - now->tv_usec);
|
||||
now->tv_usec - then->tv_usec :
|
||||
1000000 - (then->tv_usec - now->tv_usec);
|
||||
}
|
||||
|
||||
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
|
@ -110,21 +110,21 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
status = VIA_READ(VIA_REG_INTERRUPT);
|
||||
if (status & VIA_IRQ_VBLANK_PENDING) {
|
||||
atomic_inc(&dev->vbl_received);
|
||||
if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
|
||||
if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
|
||||
#ifdef __linux__
|
||||
do_gettimeofday(&cur_vblank);
|
||||
#else
|
||||
microtime(&cur_vblank);
|
||||
#endif
|
||||
if (dev_priv->last_vblank_valid) {
|
||||
if (dev_priv->last_vblank_valid) {
|
||||
dev_priv->usec_per_vblank =
|
||||
time_diff(&cur_vblank,
|
||||
&dev_priv->last_vblank) >> 4;
|
||||
}
|
||||
dev_priv->last_vblank = cur_vblank;
|
||||
dev_priv->last_vblank_valid = 1;
|
||||
}
|
||||
if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
|
||||
}
|
||||
if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
|
||||
DRM_DEBUG("US per vblank is: %u\n",
|
||||
dev_priv->usec_per_vblank);
|
||||
}
|
||||
|
@ -198,13 +198,13 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
||||
unsigned int *sequence)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
unsigned int cur_irq_sequence;
|
||||
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
|
||||
drm_via_irq_t *cur_irq;
|
||||
int ret = 0;
|
||||
maskarray_t *masks;
|
||||
int real_irq;
|
||||
|
@ -231,7 +231,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
|||
}
|
||||
|
||||
masks = dev_priv->irq_masks;
|
||||
cur_irq += real_irq;
|
||||
cur_irq = dev_priv->via_irqs + real_irq;
|
||||
|
||||
if (masks[real_irq][2] && !force_sequence) {
|
||||
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
|
||||
|
@ -257,11 +257,12 @@ void via_driver_irq_preinstall(drm_device_t * dev)
|
|||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
u32 status;
|
||||
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
|
||||
drm_via_irq_t *cur_irq;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
|
||||
if (dev_priv) {
|
||||
cur_irq = dev_priv->via_irqs;
|
||||
|
||||
dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
|
||||
dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
|
||||
|
@ -285,7 +286,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
|
|||
DRM_DEBUG("Initializing IRQ %d\n", i);
|
||||
}
|
||||
|
||||
dev_priv->last_vblank_valid = 0;
|
||||
dev_priv->last_vblank_valid = 0;
|
||||
|
||||
/* Clear VSync interrupt regs */
|
||||
status = VIA_READ(VIA_REG_INTERRUPT);
|
||||
|
|
|
@ -26,20 +26,20 @@
|
|||
#ifndef _VIA_VERIFIER_H_
|
||||
#define _VIA_VERIFIER_H_
|
||||
|
||||
typedef enum{
|
||||
typedef enum {
|
||||
no_sequence = 0,
|
||||
z_address,
|
||||
dest_address,
|
||||
tex_address
|
||||
}drm_via_sequence_t;
|
||||
} drm_via_sequence_t;
|
||||
|
||||
|
||||
|
||||
typedef struct{
|
||||
typedef struct {
|
||||
unsigned texture;
|
||||
uint32_t z_addr;
|
||||
uint32_t d_addr;
|
||||
uint32_t t_addr[2][10];
|
||||
uint32_t t_addr[2][10];
|
||||
uint32_t pitch[2][10];
|
||||
uint32_t height[2][10];
|
||||
uint32_t tex_level_lo[2];
|
||||
|
|
Loading…
Reference in New Issue