Merge branch 'master' into crestline
Conflicts: shared-core/i915_drm.h Whitespace change onlymain
commit
c5aaf7648d
|
@ -1,54 +1,55 @@
|
|||
*-core/linux
|
||||
*-core/drm.h
|
||||
*-core/drm_sarea.h
|
||||
*-core/i915_dma.c
|
||||
*-core/i915_drm.h
|
||||
*-core/i915_drv.h
|
||||
*-core/i915_irq.c
|
||||
*-core/i915_mem.c
|
||||
*-core/mach64_dma.c
|
||||
*-core/mach64_drm.h
|
||||
*-core/mach64_drv.h
|
||||
*-core/mach64_irq.c
|
||||
*-core/mach64_state.c
|
||||
*-core/mga_dma.c
|
||||
*-core/mga_drm.h
|
||||
*-core/mga_drv.h
|
||||
*-core/mga_irq.c
|
||||
*-core/mga_state.c
|
||||
*-core/mga_ucode.h
|
||||
*-core/mga_warp.c
|
||||
*-core/nv_drv.h
|
||||
*-core/r128_cce.c
|
||||
*-core/r128_drm.h
|
||||
*-core/r128_drv.h
|
||||
*-core/r128_irq.c
|
||||
*-core/r128_state.c
|
||||
*-core/r300_cmdbuf.c
|
||||
*-core/r300_reg.h
|
||||
*-core/radeon_cp.c
|
||||
*-core/radeon_drm.h
|
||||
*-core/radeon_drv.h
|
||||
*-core/radeon_irq.c
|
||||
*-core/radeon_mem.c
|
||||
*-core/radeon_state.c
|
||||
*-core/savage_bci.c
|
||||
*-core/savage_drm.h
|
||||
*-core/savage_drv.h
|
||||
*-core/savage_state.c
|
||||
*-core/sis_drm.h
|
||||
*-core/sis_drv.h
|
||||
*-core/tdfx_drv.h
|
||||
*-core/via_3d_reg.h
|
||||
*-core/via_dma.c
|
||||
*-core/via_drm.h
|
||||
*-core/via_drv.c
|
||||
*-core/via_drv.h
|
||||
*-core/via_irq.c
|
||||
*-core/via_map.c
|
||||
*-core/via_verifier.c
|
||||
*-core/via_verifier.h
|
||||
*-core/via_video.c
|
||||
bsd-core/linux
|
||||
bsd-core/drm.h
|
||||
bsd-core/drm_sarea.h
|
||||
bsd-core/i915_dma.c
|
||||
bsd-core/i915_drm.h
|
||||
bsd-core/i915_drv.h
|
||||
bsd-core/i915_irq.c
|
||||
bsd-core/i915_mem.c
|
||||
bsd-core/mach64_dma.c
|
||||
bsd-core/mach64_drm.h
|
||||
bsd-core/mach64_drv.h
|
||||
bsd-core/mach64_irq.c
|
||||
bsd-core/mach64_state.c
|
||||
bsd-core/mga_dma.c
|
||||
bsd-core/mga_drm.h
|
||||
bsd-core/mga_drv.h
|
||||
bsd-core/mga_irq.c
|
||||
bsd-core/mga_state.c
|
||||
bsd-core/mga_ucode.h
|
||||
bsd-core/mga_warp.c
|
||||
bsd-core/nv_drv.h
|
||||
bsd-core/r128_cce.c
|
||||
bsd-core/r128_drm.h
|
||||
bsd-core/r128_drv.h
|
||||
bsd-core/r128_irq.c
|
||||
bsd-core/r128_state.c
|
||||
bsd-core/r300_cmdbuf.c
|
||||
bsd-core/r300_reg.h
|
||||
bsd-core/radeon_cp.c
|
||||
bsd-core/radeon_drm.h
|
||||
bsd-core/radeon_drv.h
|
||||
bsd-core/radeon_irq.c
|
||||
bsd-core/radeon_mem.c
|
||||
bsd-core/radeon_state.c
|
||||
bsd-core/savage_bci.c
|
||||
bsd-core/savage_drm.h
|
||||
bsd-core/savage_drv.h
|
||||
bsd-core/savage_state.c
|
||||
bsd-core/sis_drm.h
|
||||
bsd-core/sis_drv.h
|
||||
bsd-core/tdfx_drv.h
|
||||
bsd-core/via_3d_reg.h
|
||||
bsd-core/via_dma.c
|
||||
bsd-core/via_drm.h
|
||||
bsd-core/via_drv.c
|
||||
bsd-core/via_drv.h
|
||||
bsd-core/via_irq.c
|
||||
bsd-core/via_map.c
|
||||
bsd-core/via_verifier.c
|
||||
bsd-core/via_verifier.h
|
||||
bsd-core/via_video.c
|
||||
*~
|
||||
*.flags
|
||||
*.ko
|
||||
*.ko.cmd
|
||||
|
@ -74,6 +75,7 @@ config.log
|
|||
config.status
|
||||
config.sub
|
||||
configure
|
||||
cscope.*
|
||||
depcomp
|
||||
device_if.h
|
||||
drm.kld
|
||||
|
|
|
@ -316,6 +316,9 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
|
|||
case _DRM_CONSISTENT:
|
||||
drm_pci_free(dev, map->dmah);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad map type %d\n", map->type);
|
||||
break;
|
||||
}
|
||||
|
||||
if (map->bsr != NULL) {
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../shared-core/drm_drawable.c
|
|
@ -0,0 +1,51 @@
|
|||
/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
|
||||
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
|
||||
*/
|
||||
/*-
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
int drm_adddraw(DRM_IOCTL_ARGS)
|
||||
{
|
||||
drm_draw_t draw;
|
||||
|
||||
draw.handle = 0; /* NOOP */
|
||||
DRM_DEBUG("%d\n", draw.handle);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_rmdraw(DRM_IOCTL_ARGS)
|
||||
{
|
||||
return 0; /* NOOP */
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
Module*.symvers
|
|
@ -58,7 +58,7 @@ endif
|
|||
|
||||
# Modules for all architectures
|
||||
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
|
||||
mach64.o nv.o
|
||||
mach64.o nv.o nouveau.o
|
||||
|
||||
# Modules only for ix86 architectures
|
||||
ifneq (,$(findstring 86,$(MACHINE)))
|
||||
|
@ -75,45 +75,27 @@ DRM_MODULES ?= $(MODULE_LIST)
|
|||
|
||||
# These definitions are for handling dependencies in the out of kernel build.
|
||||
|
||||
DRMSHARED = drm.h drm_sarea.h drm_drawable.c
|
||||
DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h
|
||||
COREHEADERS = drm_core.h drm_sman.h drm_hashtab.h
|
||||
|
||||
TDFXHEADERS = tdfx_drv.h $(DRMHEADERS)
|
||||
TDFXSHARED = tdfx_drv.h
|
||||
R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS)
|
||||
R128SHARED = r128_drv.h r128_drm.h r128_cce.c r128_state.c r128_irq.c
|
||||
RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS)
|
||||
RADEONSHARED = radeon_drv.h radeon_drm.h radeon_cp.c radeon_irq.c \
|
||||
radeon_mem.c radeon_state.c r300_cmdbuf.c r300_reg.h
|
||||
MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS)
|
||||
MGASHARED = mga_dma.c mga_drm.h mga_drv.h mga_irq.c mga_state.c \
|
||||
mga_ucode.h mga_warp.c
|
||||
I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS)
|
||||
I830HEADERS = i830_drv.h i830_drm.h $(DRMHEADERS)
|
||||
I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS)
|
||||
I915SHARED = i915_drv.h i915_drm.h i915_irq.c i915_mem.c i915_dma.c
|
||||
SISHEADERS= sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS)
|
||||
SISSHARED= sis_drv.h sis_drm.h
|
||||
SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS)
|
||||
SAVAGESHARED= savage_drv.h savage_drm.h savage_bci.c savage_state.c
|
||||
VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS)
|
||||
VIASHARED = via_drm.h via_drv.h via_3d_reg.h via_drv.c via_irq.c via_map.c \
|
||||
via_dma.c via_verifier.c via_verifier.h via_video.c
|
||||
MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
|
||||
MACH64SHARED = mach64_drv.h mach64_drm.h mach64_dma.c \
|
||||
mach64_irq.c mach64_state.c
|
||||
NVHEADERS = nv_drv.h $(DRMHEADERS)
|
||||
NVSHARED = nv_drv.h
|
||||
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
|
||||
|
||||
SHAREDSRC = $(DRMSHARED) $(MGASHARED) $(R128SHARED) $(RADEONSHARED) \
|
||||
$(SISSHARED) $(TDFXSHARED) $(VIASHARED) $(MACH64SHARED) \
|
||||
$(I915SHARED) $(SAVAGESHARED) $(NVSHARED)
|
||||
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
|
||||
|
||||
PROGS = dristat drmstat
|
||||
|
||||
CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c linux drm_pciids.h .tmp_versions
|
||||
CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions
|
||||
|
||||
# VERSION is not defined from the initial invocation. It is defined when
|
||||
# this Makefile is invoked from the kernel's root Makefile.
|
||||
|
@ -226,27 +208,13 @@ endif
|
|||
|
||||
SHAREDDIR := ../shared-core
|
||||
|
||||
HASSHARED := $(shell if [ -d $(SHAREDDIR) ]; then echo y; fi)
|
||||
|
||||
ifeq ($(HASSHARED),y)
|
||||
includes:: $(SHAREDSRC) drm_pciids.h
|
||||
ifeq ($(shell if [ -d $(SHAREDDIR) ]; then echo y; fi),y)
|
||||
includes:: drm_pciids.h
|
||||
|
||||
drm_pciids.h: $(SHAREDDIR)/drm_pciids.txt
|
||||
sh ../scripts/create_linux_pci_lists.sh < $(SHAREDDIR)/drm_pciids.txt
|
||||
|
||||
$(SHAREDSRC):
|
||||
@if [ -r $(SHAREDDIR)/$@ ]; then \
|
||||
(rm -f $@; set -x; ln -s $(SHAREDDIR)/$@ $@); fi
|
||||
|
||||
CLEANFILES += $(SHAREDSRC)
|
||||
endif
|
||||
|
||||
includes:: linux
|
||||
|
||||
linux:
|
||||
rm -f linux
|
||||
ln -s . linux
|
||||
|
||||
clean cleandir:
|
||||
rm -rf $(CLEANFILES)
|
||||
|
||||
|
@ -274,11 +242,11 @@ else
|
|||
|
||||
# Check for kernel versions that we don't support.
|
||||
|
||||
BELOW24 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 4 ]; then \
|
||||
BELOW26 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 6 ]; then \
|
||||
echo y; fi)
|
||||
|
||||
ifeq ($(BELOW24),y)
|
||||
$(error Only 2.4.x and later kernels are supported \
|
||||
ifeq ($(BELOW26),y)
|
||||
$(error Only 2.6.x and later kernels are supported \
|
||||
($(VERSION).$(PATCHLEVEL).$(SUBLEVEL)))
|
||||
endif
|
||||
|
||||
|
@ -291,30 +259,6 @@ endif
|
|||
# This needs to go before all other include paths.
|
||||
CC += -I$(DRMSRCDIR)
|
||||
|
||||
# Check for Red Hat's 4-argument do_munmap().
|
||||
DOMUNMAP := $(shell grep do_munmap $(LINUXDIR)/include/linux/mm.h | \
|
||||
grep -c acct)
|
||||
|
||||
ifneq ($(DOMUNMAP),0)
|
||||
EXTRA_CFLAGS += -DDO_MUNMAP_4_ARGS
|
||||
endif
|
||||
|
||||
# Check for 5-argument remap_page_range() in RH9 kernel, and 2.5.x kernels
|
||||
RPR := $(shell grep remap_page_range $(LINUXDIR)/include/linux/mm.h | \
|
||||
grep -c vma)
|
||||
|
||||
ifneq ($(RPR),0)
|
||||
EXTRA_CFLAGS += -DREMAP_PAGE_RANGE_5_ARGS
|
||||
endif
|
||||
|
||||
# Check for 4-argument vmap() in some 2.5.x and 2.4.x kernels
|
||||
VMAP := $(shell grep -A1 'vmap.*count,$$' $(LINUXDIR)/include/linux/vmalloc.h | \
|
||||
grep -c prot)
|
||||
|
||||
ifneq ($(VMAP),0)
|
||||
EXTRA_CFLAGS += -DVMAP_4_ARGS
|
||||
endif
|
||||
|
||||
# Check for PAGE_AGP definition
|
||||
PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
|
||||
grep -c PAGE_AGP)
|
||||
|
@ -323,7 +267,6 @@ ifneq ($(PAGE_AGP),0)
|
|||
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
|
||||
endif
|
||||
|
||||
|
||||
# Start with all modules turned off.
|
||||
CONFIG_DRM_GAMMA := n
|
||||
CONFIG_DRM_TDFX := n
|
||||
|
@ -372,6 +315,9 @@ endif
|
|||
ifneq (,$(findstring nv,$(DRM_MODULES)))
|
||||
CONFIG_DRM_NV := m
|
||||
endif
|
||||
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
|
||||
CONFIG_DRM_NOUVEAU := m
|
||||
endif
|
||||
|
||||
# These require AGP support
|
||||
|
||||
|
@ -402,6 +348,7 @@ $(savage-objs): $(SAVAGEHEADERS)
|
|||
$(via-objs): $(VIAHEADERS)
|
||||
$(mach64-objs): $(MACH64HEADERS)
|
||||
$(nv-objs): $(NVHEADERS)
|
||||
$(nouveau-objs): $(NOUVEAUHEADERS)
|
||||
|
||||
endif
|
||||
|
||||
|
|
|
@ -20,7 +20,9 @@ mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
|||
i810-objs := i810_drv.o i810_dma.o
|
||||
i830-objs := i830_drv.o i830_dma.o i830_irq.o
|
||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||
i915_buffer.o
|
||||
i915_buffer.o
|
||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||
nouveau_object.o nouveau_irq.o nv40_graph.o
|
||||
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
|
||||
sis-objs := sis_drv.o sis_mm.o
|
||||
ffb-objs := ffb_drv.o ffb_context.o
|
||||
|
@ -36,6 +38,7 @@ radeon-objs += radeon_ioc32.o
|
|||
mga-objs += mga_ioc32.o
|
||||
r128-objs += r128_ioc32.o
|
||||
i915-objs += i915_ioc32.o
|
||||
nouveau-objs += nouveau_ioc32.o
|
||||
endif
|
||||
|
||||
obj-m += drm.o
|
||||
|
@ -52,3 +55,4 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage.o
|
|||
obj-$(CONFIG_DRM_VIA) += via.o
|
||||
obj-$(CONFIG_DRM_MACH64)+= mach64.o
|
||||
obj-$(CONFIG_DRM_NV) += nv.o
|
||||
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../shared-core/drm.h
|
|
@ -67,19 +67,11 @@
|
|||
#include <asm/mtrr.h>
|
||||
#endif
|
||||
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
|
||||
#include <asm/agp.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
|
||||
#define HAS_WORKQUEUE 0
|
||||
#else
|
||||
#define HAS_WORKQUEUE 1
|
||||
#endif
|
||||
#if !HAS_WORKQUEUE
|
||||
#include <linux/tqueue.h>
|
||||
#else
|
||||
#include <linux/workqueue.h>
|
||||
#endif
|
||||
#include <linux/poll.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include "drm.h"
|
||||
|
@ -553,7 +545,8 @@ typedef struct drm_mm_node {
|
|||
} drm_mm_node_t;
|
||||
|
||||
typedef struct drm_mm {
|
||||
drm_mm_node_t root_node;
|
||||
struct list_head fl_entry;
|
||||
struct list_head ml_entry;
|
||||
} drm_mm_t;
|
||||
|
||||
|
||||
|
@ -755,17 +748,6 @@ typedef struct drm_head {
|
|||
struct class_device *dev_class;
|
||||
} drm_head_t;
|
||||
|
||||
typedef struct drm_cache {
|
||||
|
||||
/*
|
||||
* Memory caches
|
||||
*/
|
||||
|
||||
kmem_cache_t *mm;
|
||||
kmem_cache_t *fence_object;
|
||||
} drm_cache_t;
|
||||
|
||||
|
||||
|
||||
typedef struct drm_fence_driver{
|
||||
int no_types;
|
||||
|
@ -812,7 +794,11 @@ typedef struct drm_buffer_manager{
|
|||
struct list_head pinned[DRM_BO_MEM_TYPES];
|
||||
struct list_head unfenced;
|
||||
struct list_head ddestroy;
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
struct work_struct wq;
|
||||
#else
|
||||
struct delayed_work wq;
|
||||
#endif
|
||||
uint32_t fence_type;
|
||||
unsigned long cur_pages;
|
||||
atomic_t count;
|
||||
|
@ -908,11 +894,8 @@ typedef struct drm_device {
|
|||
unsigned long last_switch; /**< jiffies at last context switch */
|
||||
/*@} */
|
||||
|
||||
#if !HAS_WORKQUEUE
|
||||
struct tq_struct tq;
|
||||
#else
|
||||
struct work_struct work;
|
||||
#endif
|
||||
|
||||
/** \name VBLANK IRQ support */
|
||||
/*@{ */
|
||||
|
||||
|
@ -940,11 +923,7 @@ typedef struct drm_device {
|
|||
int pci_vendor; /**< PCI vendor id */
|
||||
int pci_device; /**< PCI device id */
|
||||
#ifdef __alpha__
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
|
||||
struct pci_controler *hose;
|
||||
#else
|
||||
struct pci_controller *hose;
|
||||
#endif
|
||||
#endif
|
||||
drm_sg_mem_t *sg; /**< Scatter gather memory */
|
||||
unsigned long *ctx_bitmap; /**< context bitmap */
|
||||
|
@ -1094,6 +1073,7 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
|
|||
}
|
||||
|
||||
#define drm_core_has_MTRR(dev) (0)
|
||||
#define DRM_MTRR_WC 0
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -1318,7 +1298,6 @@ extern int drm_put_head(drm_head_t * head);
|
|||
extern unsigned int drm_debug; /* 1 to enable debug output */
|
||||
extern unsigned int drm_cards_limit;
|
||||
extern drm_head_t **drm_heads;
|
||||
extern drm_cache_t drm_cache;
|
||||
extern struct drm_sysfs_class *drm_class;
|
||||
extern struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
|
@ -1478,26 +1457,8 @@ extern int drm_fence_buffer_objects(drm_file_t * priv,
|
|||
drm_fence_object_t *fence,
|
||||
drm_fence_object_t **used_fence);
|
||||
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
if (map->handle && map->size)
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
}
|
||||
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
|
||||
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
|
||||
unsigned int token)
|
||||
|
@ -1581,25 +1542,6 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
|
|||
drm_free_memctl(size);
|
||||
}
|
||||
|
||||
static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
|
||||
int flags)
|
||||
{
|
||||
void *ret;
|
||||
if (drm_alloc_memctl(size))
|
||||
return NULL;
|
||||
ret = kmem_cache_alloc(cache, flags);
|
||||
if (!ret)
|
||||
drm_free_memctl(size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
|
||||
void *obj)
|
||||
{
|
||||
kmem_cache_free(cache, obj);
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
|
||||
/*@}*/
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -106,10 +106,6 @@ int drm_agp_acquire(drm_device_t * dev)
|
|||
return -ENODEV;
|
||||
if (dev->agp->acquired)
|
||||
return -EBUSY;
|
||||
#ifndef VMAP_4_ARGS
|
||||
if (dev->agp->cant_use_aperture)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
|
||||
if ((retcode = agp_backend_acquire()))
|
||||
return retcode;
|
||||
|
@ -563,6 +559,8 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
|
|||
#define AGP_USER_MEMORY (AGP_USER_TYPES)
|
||||
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
|
||||
#endif
|
||||
#define AGP_REQUIRED_MAJOR 0
|
||||
#define AGP_REQUIRED_MINOR 102
|
||||
|
||||
static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
|
||||
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
|
||||
|
@ -673,6 +671,24 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
|||
|
||||
drm_ttm_backend_t *agp_be;
|
||||
drm_agp_ttm_priv *agp_priv;
|
||||
struct agp_kern_info *info;
|
||||
|
||||
if (!dev->agp) {
|
||||
DRM_ERROR("AGP is not initialized.\n");
|
||||
return NULL;
|
||||
}
|
||||
info = &dev->agp->agp_info;
|
||||
|
||||
if (info->version.major != AGP_REQUIRED_MAJOR ||
|
||||
info->version.minor < AGP_REQUIRED_MINOR) {
|
||||
DRM_ERROR("Wrong agpgart version %d.%d\n"
|
||||
"\tYou need at least version %d.%d.\n",
|
||||
info->version.major,
|
||||
info->version.minor,
|
||||
AGP_REQUIRED_MAJOR,
|
||||
AGP_REQUIRED_MINOR);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
agp_be = (backend != NULL) ? backend:
|
||||
drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
||||
|
@ -687,6 +703,7 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
agp_priv->mem = NULL;
|
||||
agp_priv->alloc_type = AGP_USER_MEMORY;
|
||||
agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
|
||||
|
|
|
@ -352,10 +352,20 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
|
|||
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
static void drm_bo_delayed_workqueue(void *data)
|
||||
#else
|
||||
static void drm_bo_delayed_workqueue(struct work_struct *work)
|
||||
#endif
|
||||
{
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
drm_device_t *dev = (drm_device_t *) data;
|
||||
drm_buffer_manager_t *bm = &dev->bm;
|
||||
#else
|
||||
drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
|
||||
drm_device_t *dev = container_of(bm, drm_device_t, bm);
|
||||
#endif
|
||||
|
||||
|
||||
DRM_DEBUG("Delayed delete Worker\n");
|
||||
|
||||
|
@ -1904,7 +1914,11 @@ int drm_bo_driver_init(drm_device_t * dev)
|
|||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
|
||||
#else
|
||||
INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
|
||||
#endif
|
||||
bm->initialized = 1;
|
||||
bm->nice_mode = 1;
|
||||
atomic_set(&bm->count, 0);
|
||||
|
|
|
@ -179,7 +179,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
}
|
||||
}
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
map->handle = drm_ioremap(map->offset, map->size, dev);
|
||||
map->handle = ioremap(map->offset, map->size);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
list = drm_find_matching_map(dev, map);
|
||||
|
@ -195,7 +195,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
*maplist = list;
|
||||
return 0;
|
||||
}
|
||||
map->handle = vmalloc_32(map->size);
|
||||
map->handle = vmalloc_user(map->size);
|
||||
DRM_DEBUG("%lu %d %p\n",
|
||||
map->size, drm_order(map->size), map->handle);
|
||||
if (!map->handle) {
|
||||
|
@ -279,6 +279,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
|
||||
list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
|
||||
if (!list) {
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
iounmap(map->handle);
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -295,6 +297,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
ret = drm_map_handle(dev, &list->hash, user_token, 0);
|
||||
|
||||
if (ret) {
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
iounmap(map->handle);
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -402,7 +406,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
|
|||
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
iounmap(map->handle);
|
||||
/* FALLTHROUGH */
|
||||
case _DRM_FRAME_BUFFER:
|
||||
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
|
||||
|
|
|
@ -251,7 +251,8 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
|||
page = NOPAGE_OOM;
|
||||
goto out;
|
||||
}
|
||||
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
||||
page = ttm->pages[page_offset] =
|
||||
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
page = NOPAGE_OOM;
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/agp.h>
|
||||
#ifndef _DRM_COMPAT_H_
|
||||
#define _DRM_COMPAT_H_
|
||||
|
||||
|
@ -57,6 +56,12 @@
|
|||
#define module_param(name, type, perm)
|
||||
#endif
|
||||
|
||||
/* older kernels had different irq args */
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
#undef DRM_IRQ_ARGS
|
||||
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
|
||||
#endif
|
||||
|
||||
#ifndef list_for_each_safe
|
||||
#define list_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->next, n = pos->next; pos != (head); \
|
||||
|
@ -80,92 +85,6 @@
|
|||
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
|
||||
static inline struct page *vmalloc_to_page(void *vmalloc_addr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)vmalloc_addr;
|
||||
struct page *page = NULL;
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
if (!pgd_none(*pgd)) {
|
||||
pmd = pmd_offset(pgd, addr);
|
||||
if (!pmd_none(*pmd)) {
|
||||
preempt_disable();
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
pte = *ptep;
|
||||
if (pte_present(pte))
|
||||
page = pte_page(pte);
|
||||
pte_unmap(ptep);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
return page;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
|
||||
#define down_write down
|
||||
#define up_write up
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
|
||||
#define DRM_PCI_DEV(pdev) &pdev->dev
|
||||
#else
|
||||
#define DRM_PCI_DEV(pdev) NULL
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
||||
static inline unsigned iminor(struct inode *inode)
|
||||
{
|
||||
return MINOR(inode->i_rdev);
|
||||
}
|
||||
|
||||
#define old_encode_dev(x) (x)
|
||||
|
||||
struct drm_sysfs_class;
|
||||
struct class_simple;
|
||||
struct device;
|
||||
|
||||
#define pci_dev_put(x) do {} while (0)
|
||||
#define pci_get_subsys pci_find_subsys
|
||||
|
||||
static inline struct class_device *DRM(sysfs_device_add) (struct drm_sysfs_class
|
||||
* cs, dev_t dev,
|
||||
struct device *
|
||||
device,
|
||||
const char *fmt,
|
||||
...) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void DRM(sysfs_device_remove) (dev_t dev) {
|
||||
}
|
||||
|
||||
static inline void DRM(sysfs_destroy) (struct drm_sysfs_class * cs) {
|
||||
}
|
||||
|
||||
static inline struct drm_sysfs_class *DRM(sysfs_create) (struct module * owner,
|
||||
char *name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifndef pci_pretty_name
|
||||
#define pci_pretty_name(x) x->name
|
||||
#endif
|
||||
|
||||
struct drm_device;
|
||||
static inline int radeon_create_i2c_busses(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
static inline void radeon_delete_i2c_busses(struct drm_device *dev)
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef __user
|
||||
#define __user
|
||||
#endif
|
||||
|
@ -178,22 +97,27 @@ static inline void radeon_delete_i2c_busses(struct drm_device *dev)
|
|||
#define __GFP_COMP 0
|
||||
#endif
|
||||
|
||||
#ifndef REMAP_PAGE_RANGE_5_ARGS
|
||||
#define DRM_RPR_ARG(vma)
|
||||
#else
|
||||
#define DRM_RPR_ARG(vma) vma,
|
||||
#endif
|
||||
|
||||
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
|
||||
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
|
||||
{
|
||||
return remap_page_range(DRM_RPR_ARG(vma) from,
|
||||
return remap_page_range(vma, from,
|
||||
pfn << PAGE_SHIFT,
|
||||
size,
|
||||
pgprot);
|
||||
}
|
||||
|
||||
static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
addr = kmalloc(size * nmemb, flags);
|
||||
if (addr != NULL)
|
||||
memset((void *)addr, 0, size * nmemb);
|
||||
|
||||
return addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
|
||||
|
@ -215,10 +139,6 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
|
|||
#define __x86_64__
|
||||
#endif
|
||||
|
||||
#ifndef pci_pretty_name
|
||||
#define pci_pretty_name(dev) ""
|
||||
#endif
|
||||
|
||||
/* sysfs __ATTR macro */
|
||||
#ifndef __ATTR
|
||||
#define __ATTR(_name,_mode,_show,_store) { \
|
||||
|
@ -228,10 +148,17 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
|
|||
}
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
|
||||
#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \
|
||||
if (tmp) memset(tmp, 0, size); \
|
||||
(tmp);})
|
||||
#endif
|
||||
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
|
||||
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
|
||||
#define DRM_ODD_MM_COMPAT
|
||||
#endif
|
||||
|
@ -253,16 +180,9 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
|
|||
|
||||
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
||||
|
||||
/*
|
||||
* These are similar to the current kernel gatt pages allocator, only that we
|
||||
* want a struct page pointer instead of a virtual address. This allows for pages
|
||||
* that are not in the kernel linear map.
|
||||
*/
|
||||
|
||||
#define drm_alloc_gatt_pages(order) ({ \
|
||||
void *_virt = alloc_gatt_pages(order); \
|
||||
((_virt) ? virt_to_page(_virt) : NULL);})
|
||||
#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
|
||||
#ifndef GFP_DMA32
|
||||
#define GFP_DMA32 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
|
||||
|
@ -288,7 +208,7 @@ extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
|||
#endif
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
|
||||
|
||||
/*
|
||||
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
|
||||
|
|
|
@ -446,52 +446,6 @@ static struct file_operations drm_stub_fops = {
|
|||
.open = drm_stub_open
|
||||
};
|
||||
|
||||
static int drm_create_memory_caches(void)
|
||||
{
|
||||
drm_cache.mm = kmem_cache_create("drm_mm_node_t",
|
||||
sizeof(drm_mm_node_t),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL,NULL);
|
||||
if (!drm_cache.mm)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",
|
||||
sizeof(drm_fence_object_t),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL,NULL);
|
||||
if (!drm_cache.fence_object)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_free_mem_cache(kmem_cache_t *cache,
|
||||
const char *name)
|
||||
{
|
||||
if (!cache)
|
||||
return;
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
if (kmem_cache_destroy(cache)) {
|
||||
DRM_ERROR("Warning! DRM is leaking %s memory.\n",
|
||||
name);
|
||||
}
|
||||
#else
|
||||
kmem_cache_destroy(cache);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void drm_free_memory_caches(void )
|
||||
{
|
||||
|
||||
drm_free_mem_cache(drm_cache.fence_object, "fence object");
|
||||
drm_cache.fence_object = NULL;
|
||||
drm_free_mem_cache(drm_cache.mm, "memory manager block");
|
||||
drm_cache.mm = NULL;
|
||||
}
|
||||
|
||||
|
||||
static int __init drm_core_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -499,9 +453,6 @@ static int __init drm_core_init(void)
|
|||
|
||||
si_meminfo(&si);
|
||||
drm_init_memctl(si.totalram/2, si.totalram*3/4);
|
||||
ret = drm_create_memory_caches();
|
||||
if (ret)
|
||||
goto err_p1;
|
||||
|
||||
ret = -ENOMEM;
|
||||
drm_cards_limit =
|
||||
|
@ -539,13 +490,11 @@ err_p2:
|
|||
unregister_chrdev(DRM_MAJOR, "drm");
|
||||
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
|
||||
err_p1:
|
||||
drm_free_memory_caches();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit drm_core_exit(void)
|
||||
{
|
||||
drm_free_memory_caches();
|
||||
remove_proc_entry("dri", NULL);
|
||||
drm_sysfs_destroy(drm_class);
|
||||
|
||||
|
|
|
@ -117,8 +117,7 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
|
|||
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
||||
fence->base.hash.key);
|
||||
atomic_dec(&fm->count);
|
||||
drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
|
||||
fence);
|
||||
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,8 +131,7 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
|||
if (atomic_read(&fence->usage) == 0) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
atomic_dec(&fm->count);
|
||||
drm_ctl_cache_free(drm_cache.fence_object,
|
||||
sizeof(*fence), fence);
|
||||
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
@ -439,8 +437,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
|
|||
int ret;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
fence = drm_ctl_cache_alloc(drm_cache.fence_object,
|
||||
sizeof(*fence), GFP_KERNEL);
|
||||
fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
ret = drm_fence_object_init(dev, type, flags, fence);
|
||||
|
|
|
@ -46,7 +46,7 @@ static int drm_setup(drm_device_t * dev)
|
|||
drm_local_map_t *map;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
int sareapage;
|
||||
|
||||
if (dev->driver->firstopen) {
|
||||
ret = dev->driver->firstopen(dev);
|
||||
|
@ -57,8 +57,8 @@ static int drm_setup(drm_device_t * dev)
|
|||
dev->magicfree.next = NULL;
|
||||
|
||||
/* prebuild the SAREA */
|
||||
|
||||
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
|
||||
sareapage = max(SAREA_MAX, PAGE_SIZE);
|
||||
i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
|
||||
if (i != 0)
|
||||
return i;
|
||||
|
||||
|
@ -426,7 +426,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
current->pid, (long)old_encode_dev(priv->head->device),
|
||||
dev->open_count);
|
||||
|
||||
if (dev->driver->reclaim_buffers_locked) {
|
||||
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
|
||||
unsigned long _end = jiffies + DRM_HZ*3;
|
||||
|
||||
do {
|
||||
|
@ -446,12 +446,12 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
* holds the lock. Then we can run reclaim buffers locked anyway.
|
||||
*/
|
||||
|
||||
DRM_ERROR("Reclaim buffers locked deadlock.\n");
|
||||
DRM_ERROR("This is probably a single thread having multiple\n");
|
||||
DRM_ERROR("DRM file descriptors open either dying or "
|
||||
"closing file descriptors\n");
|
||||
DRM_ERROR("while having the lock. I will not reclaim buffers.\n");
|
||||
DRM_ERROR("Locking context is 0x%08x\n",
|
||||
DRM_ERROR("Reclaim buffers locked deadlock.\n"
|
||||
"\tThis is probably a single thread having multiple\n"
|
||||
"\tDRM file descriptors open either dying or"
|
||||
" closing file descriptors\n"
|
||||
"\twhile having the lock. I will not reclaim buffers.\n"
|
||||
"\tLocking context is 0x%08x\n",
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
}
|
||||
} else if (drm_i_have_hw_lock(filp)) {
|
||||
|
|
|
@ -337,7 +337,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
|
|||
retv.drm_dd_major = dev->driver->major;
|
||||
retv.drm_dd_minor = dev->driver->minor;
|
||||
|
||||
if (copy_to_user(argp, &retv, sizeof(sv)))
|
||||
if (copy_to_user(argp, &retv, sizeof(retv)))
|
||||
return -EFAULT;
|
||||
|
||||
if (sv.drm_di_major != -1) {
|
||||
|
|
|
@ -134,13 +134,7 @@ int drm_mem_info(char *buf, char **start, off_t offset,
|
|||
/** Wrapper around kmalloc() */
|
||||
void *drm_calloc(size_t nmemb, size_t size, int area)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
addr = kmalloc(size * nmemb, GFP_KERNEL);
|
||||
if (addr != NULL)
|
||||
memset((void *)addr, 0, size * nmemb);
|
||||
|
||||
return addr;
|
||||
return kcalloc(nmemb, size, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_calloc);
|
||||
|
||||
|
@ -250,3 +244,26 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
|
|||
}
|
||||
#endif /* agp */
|
||||
#endif /* debug_memory */
|
||||
|
||||
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) &&
|
||||
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
|
||||
map->handle = agp_remap(map->offset, map->size, dev);
|
||||
else
|
||||
map->handle = ioremap(map->offset, map->size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_core_ioremap);
|
||||
|
||||
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if (!map->handle || !map->size)
|
||||
return;
|
||||
|
||||
if (drm_core_has_AGP(dev) &&
|
||||
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
|
||||
vunmap(map->handle);
|
||||
else
|
||||
iounmap(map->handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_core_ioremapfree);
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
*/
|
||||
|
||||
/* Need the 4-argument version of vmap(). */
|
||||
#if __OS_HAS_AGP && defined(VMAP_4_ARGS)
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
|
@ -57,18 +57,6 @@
|
|||
# endif
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
|
||||
#ifndef pte_offset_kernel
|
||||
# define pte_offset_kernel(dir, address) pte_offset(dir, address)
|
||||
#endif
|
||||
#ifndef pte_pfn
|
||||
# define pte_pfn(pte) (pte_page(pte) - mem_map)
|
||||
#endif
|
||||
#ifndef pfn_to_page
|
||||
# define pfn_to_page(pfn) (mem_map + (pfn))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find the drm_map that covers the range [offset, offset+size).
|
||||
*/
|
||||
|
@ -134,19 +122,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
|
|||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)
|
||||
pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
|
||||
#else
|
||||
pud_t *pud = pud_offset(pgd, (unsigned long) vaddr);
|
||||
pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr);
|
||||
#endif
|
||||
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
|
||||
return pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#else /* __OS_HAS_AGP */
|
||||
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
|
@ -161,73 +136,4 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef DEBUG_MEMORY
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
#if defined(VMAP_4_ARGS)
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
#if defined(VMAP_4_ARGS)
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
#if defined(VMAP_4_ARGS)
|
||||
/*
|
||||
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
|
||||
* routines for handling mappings in the AGP space. Hopefully this can be done in
|
||||
* a future revision of the interface...
|
||||
*/
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long)pt >= VMALLOC_START
|
||||
&& (unsigned long)pt < VMALLOC_END)) {
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
|
||||
map = drm_lookup_map(offset, size, dev);
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
vunmap(pt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
iounmap(pt);
|
||||
}
|
||||
#else
|
||||
extern void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
extern void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev);
|
||||
extern void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
#endif
|
||||
|
|
|
@ -289,79 +289,6 @@ void drm_free_pages(unsigned long address, int order, int area)
|
|||
}
|
||||
}
|
||||
|
||||
void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremap);
|
||||
|
||||
void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremap_nocache);
|
||||
|
||||
void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
if (!pt)
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Attempt to free NULL pointer\n");
|
||||
else
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
free_count, alloc_count);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremapfree);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
|
||||
|
|
|
@ -275,74 +275,6 @@ void drm_free_pages (unsigned long address, int order, int area) {
|
|||
}
|
||||
}
|
||||
|
||||
void *drm_ioremap (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
if (!pt)
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Attempt to free NULL pointer\n");
|
||||
else
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
free_count, alloc_count);
|
||||
}
|
||||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
|
||||
|
|
|
@ -49,7 +49,7 @@ unsigned long drm_mm_tail_space(drm_mm_t *mm)
|
|||
struct list_head *tail_node;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
tail_node = mm->root_node.ml_entry.prev;
|
||||
tail_node = mm->ml_entry.prev;
|
||||
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
|
||||
if (!entry->free)
|
||||
return 0;
|
||||
|
@ -62,7 +62,7 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
|
|||
struct list_head *tail_node;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
tail_node = mm->root_node.ml_entry.prev;
|
||||
tail_node = mm->ml_entry.prev;
|
||||
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
|
||||
if (!entry->free)
|
||||
return -ENOMEM;
|
||||
|
@ -82,8 +82,7 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
|
|||
drm_mm_node_t *child;
|
||||
|
||||
child = (drm_mm_node_t *)
|
||||
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
|
||||
GFP_KERNEL);
|
||||
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
|
||||
if (!child)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -92,8 +91,8 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
|
|||
child->start = start;
|
||||
child->mm = mm;
|
||||
|
||||
list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
|
||||
list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
|
||||
list_add_tail(&child->ml_entry, &mm->ml_entry);
|
||||
list_add_tail(&child->fl_entry, &mm->fl_entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -104,7 +103,7 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
|
|||
struct list_head *tail_node;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
tail_node = mm->root_node.ml_entry.prev;
|
||||
tail_node = mm->ml_entry.prev;
|
||||
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
|
||||
if (!entry->free) {
|
||||
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
|
||||
|
@ -119,8 +118,7 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
|
|||
drm_mm_node_t *child;
|
||||
|
||||
child = (drm_mm_node_t *)
|
||||
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
|
||||
GFP_KERNEL);
|
||||
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
|
||||
if (!child)
|
||||
return NULL;
|
||||
|
||||
|
@ -150,7 +148,7 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
|
|||
unsigned tmp = 0;
|
||||
|
||||
if (alignment)
|
||||
tmp = size % alignment;
|
||||
tmp = parent->start % alignment;
|
||||
|
||||
if (tmp) {
|
||||
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
|
||||
|
@ -164,12 +162,8 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
|
|||
return parent;
|
||||
} else {
|
||||
child = drm_mm_split_at_start(parent, size);
|
||||
if (!child) {
|
||||
if (align_splitoff)
|
||||
drm_mm_put_block(align_splitoff);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (align_splitoff)
|
||||
drm_mm_put_block(align_splitoff);
|
||||
|
||||
|
@ -185,9 +179,8 @@ void drm_mm_put_block(drm_mm_node_t * cur)
|
|||
{
|
||||
|
||||
drm_mm_t *mm = cur->mm;
|
||||
drm_mm_node_t *list_root = &mm->root_node;
|
||||
struct list_head *cur_head = &cur->ml_entry;
|
||||
struct list_head *root_head = &list_root->ml_entry;
|
||||
struct list_head *root_head = &mm->ml_entry;
|
||||
drm_mm_node_t *prev_node = NULL;
|
||||
drm_mm_node_t *next_node;
|
||||
|
||||
|
@ -207,9 +200,8 @@ void drm_mm_put_block(drm_mm_node_t * cur)
|
|||
prev_node->size += next_node->size;
|
||||
list_del(&next_node->ml_entry);
|
||||
list_del(&next_node->fl_entry);
|
||||
drm_ctl_cache_free(drm_cache.mm,
|
||||
sizeof(*next_node),
|
||||
next_node);
|
||||
drm_ctl_free(next_node, sizeof(*next_node),
|
||||
DRM_MEM_MM);
|
||||
} else {
|
||||
next_node->size += cur->size;
|
||||
next_node->start = cur->start;
|
||||
|
@ -219,10 +211,10 @@ void drm_mm_put_block(drm_mm_node_t * cur)
|
|||
}
|
||||
if (!merged) {
|
||||
cur->free = 1;
|
||||
list_add(&cur->fl_entry, &list_root->fl_entry);
|
||||
list_add(&cur->fl_entry, &mm->fl_entry);
|
||||
} else {
|
||||
list_del(&cur->ml_entry);
|
||||
drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
|
||||
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -231,7 +223,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
|||
unsigned alignment, int best_match)
|
||||
{
|
||||
struct list_head *list;
|
||||
const struct list_head *free_stack = &mm->root_node.fl_entry;
|
||||
const struct list_head *free_stack = &mm->fl_entry;
|
||||
drm_mm_node_t *entry;
|
||||
drm_mm_node_t *best;
|
||||
unsigned long best_size;
|
||||
|
@ -244,8 +236,11 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
|||
entry = list_entry(list, drm_mm_node_t, fl_entry);
|
||||
wasted = 0;
|
||||
|
||||
if (entry->size < size)
|
||||
continue;
|
||||
|
||||
if (alignment) {
|
||||
register unsigned tmp = size % alignment;
|
||||
register unsigned tmp = entry->start % alignment;
|
||||
if (tmp)
|
||||
wasted += alignment - tmp;
|
||||
}
|
||||
|
@ -266,15 +261,15 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
|||
|
||||
int drm_mm_clean(drm_mm_t * mm)
|
||||
{
|
||||
struct list_head *head = &mm->root_node.ml_entry;
|
||||
struct list_head *head = &mm->ml_entry;
|
||||
|
||||
return (head->next->next == head);
|
||||
}
|
||||
|
||||
int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
|
||||
{
|
||||
INIT_LIST_HEAD(&mm->root_node.ml_entry);
|
||||
INIT_LIST_HEAD(&mm->root_node.fl_entry);
|
||||
INIT_LIST_HEAD(&mm->ml_entry);
|
||||
INIT_LIST_HEAD(&mm->fl_entry);
|
||||
|
||||
return drm_mm_create_tail_node(mm, start, size);
|
||||
}
|
||||
|
@ -283,20 +278,20 @@ EXPORT_SYMBOL(drm_mm_init);
|
|||
|
||||
void drm_mm_takedown(drm_mm_t * mm)
|
||||
{
|
||||
struct list_head *bnode = mm->root_node.fl_entry.next;
|
||||
struct list_head *bnode = mm->fl_entry.next;
|
||||
drm_mm_node_t *entry;
|
||||
|
||||
entry = list_entry(bnode, drm_mm_node_t, fl_entry);
|
||||
|
||||
if (entry->ml_entry.next != &mm->root_node.ml_entry ||
|
||||
entry->fl_entry.next != &mm->root_node.fl_entry) {
|
||||
if (entry->ml_entry.next != &mm->ml_entry ||
|
||||
entry->fl_entry.next != &mm->fl_entry) {
|
||||
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&entry->fl_entry);
|
||||
list_del(&entry->ml_entry);
|
||||
drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
|
||||
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_mm_takedown);
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
drm_device_t *dev = priv->head->dev
|
||||
|
||||
/** IRQ handler arguments and return type and values */
|
||||
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
|
||||
#define DRM_IRQ_ARGS int irq, void *arg
|
||||
/** backwards compatibility with old irq return values */
|
||||
#ifndef IRQ_HANDLED
|
||||
typedef void irqreturn_t;
|
||||
|
@ -66,13 +66,8 @@ typedef void irqreturn_t;
|
|||
|
||||
/** AGP types */
|
||||
#if __OS_HAS_AGP
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,70)
|
||||
#define DRM_AGP_MEM agp_memory
|
||||
#define DRM_AGP_KERN agp_kern_info
|
||||
#else
|
||||
#define DRM_AGP_MEM struct agp_memory
|
||||
#define DRM_AGP_KERN struct agp_kern_info
|
||||
#endif
|
||||
#else
|
||||
/* define some dummy types for non AGP supporting kernels */
|
||||
struct no_agp_kern {
|
||||
|
@ -98,9 +93,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
|
|||
#define MTRR_TYPE_WRCOMB 1
|
||||
#endif
|
||||
|
||||
/** Task queue handler arguments */
|
||||
#define DRM_TASKQUEUE_ARGS void *arg
|
||||
|
||||
/** For data going into the kernel through the ioctl argument */
|
||||
#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \
|
||||
if ( copy_from_user(&arg1, arg2, arg3) ) \
|
||||
|
|
|
@ -452,19 +452,23 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
|
|||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("Object accounting:\n\n");
|
||||
if (fm->initialized) {
|
||||
DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",
|
||||
DRM_PROC_PRINT("Number of active fence objects: %d.\n",
|
||||
atomic_read(&fm->count));
|
||||
} else {
|
||||
DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
|
||||
DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
|
||||
}
|
||||
|
||||
if (bm->initialized) {
|
||||
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
|
||||
atomic_read(&bm->count));
|
||||
}
|
||||
DRM_PROC_PRINT("Memory accounting:\n\n");
|
||||
if (bm->initialized) {
|
||||
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
|
||||
} else {
|
||||
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
|
||||
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
|
||||
}
|
||||
|
||||
drm_query_memctl(&used_mem, &low_mem, &high_mem);
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../shared-core/drm_sarea.h
|
|
@ -47,18 +47,13 @@ MODULE_LICENSE("GPL and additional rights");
|
|||
MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
|
||||
MODULE_PARM_DESC(debug, "Enable debug output");
|
||||
|
||||
module_param_named(cards_limit, drm_cards_limit, int, S_IRUGO);
|
||||
module_param_named(debug, drm_debug, int, S_IRUGO|S_IWUGO);
|
||||
module_param_named(cards_limit, drm_cards_limit, int, 0444);
|
||||
module_param_named(debug, drm_debug, int, 0600);
|
||||
|
||||
drm_head_t **drm_heads;
|
||||
struct drm_sysfs_class *drm_class;
|
||||
struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
drm_cache_t drm_cache =
|
||||
{ .mm = NULL,
|
||||
.fence_object = NULL
|
||||
};
|
||||
|
||||
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver *driver)
|
||||
|
@ -249,9 +244,9 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
|||
if ((ret = drm_get_head(dev, &dev->primary)))
|
||||
goto err_g1;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor, driver->patchlevel,
|
||||
driver->date, dev->primary.minor, pci_pretty_name(dev->pdev));
|
||||
driver->date, dev->primary.minor);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
|
|||
memset(s_dev, 0x00, sizeof(*s_dev));
|
||||
|
||||
s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
|
||||
s_dev->class_dev.dev = DRM_PCI_DEV(head->dev->pdev);
|
||||
s_dev->class_dev.dev = &head->dev->pdev->dev;
|
||||
s_dev->class_dev.class = &cs->class;
|
||||
|
||||
snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);
|
||||
|
|
|
@ -193,7 +193,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
|
|||
* End debugging.
|
||||
*/
|
||||
|
||||
drm_free_gatt_pages(*cur_page, 0);
|
||||
__free_page(*cur_page);
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
--bm->cur_pages;
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
|
|||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
page = drm_alloc_gatt_pages(0);
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -159,9 +159,9 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
}
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
|
||||
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
|
||||
static
|
||||
#endif
|
||||
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
||||
|
@ -208,7 +208,8 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
|||
data->type = VM_FAULT_OOM;
|
||||
goto out;
|
||||
}
|
||||
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
||||
page = ttm->pages[page_offset] =
|
||||
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
data->type = VM_FAULT_OOM;
|
||||
|
@ -269,13 +270,13 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
|
|||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!map)
|
||||
return NOPAGE_OOM; /* Nothing allocated */
|
||||
return NOPAGE_SIGBUS; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
i = (unsigned long)map->handle + offset;
|
||||
page = vmalloc_to_page((void *)i);
|
||||
if (!page)
|
||||
return NOPAGE_OOM;
|
||||
return NOPAGE_SIGBUS;
|
||||
get_page(page);
|
||||
|
||||
DRM_DEBUG("shm_nopage 0x%lx\n", address);
|
||||
|
@ -348,7 +349,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
map->size);
|
||||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
iounmap(map->handle);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
@ -396,7 +397,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
|
|||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!dma->pagelist)
|
||||
return NOPAGE_OOM; /* Nothing allocated */
|
||||
return NOPAGE_SIGBUS; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
|
||||
page_nr = offset >> PAGE_SHIFT;
|
||||
|
@ -435,7 +436,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!entry->pagelist)
|
||||
return NOPAGE_OOM; /* Nothing allocated */
|
||||
return NOPAGE_SIGBUS; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
map_offset = map->offset - (unsigned long)dev->sg->virtual;
|
||||
|
@ -446,8 +447,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
return page;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
||||
|
||||
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
|
@ -481,34 +480,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
|
||||
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
|
||||
|
||||
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_shm_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_dma_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_sg_nopage(vma, address);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/** AGP virtual memory operations */
|
||||
static struct vm_operations_struct drm_vm_ops = {
|
||||
.nopage = drm_vm_nopage,
|
||||
|
@ -537,7 +508,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
|
|||
.close = drm_vm_close,
|
||||
};
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
|
||||
static struct vm_operations_struct drm_vm_ttm_ops = {
|
||||
.nopage = drm_vm_ttm_nopage,
|
||||
.open = drm_vm_ttm_open_wrapper,
|
||||
|
@ -712,12 +683,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
|||
}
|
||||
|
||||
vma->vm_ops = &drm_vm_dma_ops;
|
||||
|
||||
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
|
||||
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
|
||||
#else
|
||||
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
||||
#endif
|
||||
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
drm_vm_open(vma);
|
||||
|
@ -829,6 +795,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
vma->vm_page_prot = drm_io_prot(map->type, vma);
|
||||
#ifdef __sparc__
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
(map->offset + offset) >>PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start,
|
||||
|
@ -859,20 +826,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_private_data = (void *)map;
|
||||
/* Don't let this area swap. Change when
|
||||
DRM_KERNEL advisory is supported. */
|
||||
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
|
||||
vma->vm_flags |= VM_LOCKED;
|
||||
#else
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
#endif
|
||||
break;
|
||||
case _DRM_SCATTER_GATHER:
|
||||
vma->vm_ops = &drm_vm_sg_ops;
|
||||
vma->vm_private_data = (void *)map;
|
||||
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
|
||||
vma->vm_flags |= VM_LOCKED;
|
||||
#else
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
#endif
|
||||
break;
|
||||
case _DRM_TTM: {
|
||||
vma->vm_ops = &drm_vm_ttm_ops;
|
||||
|
@ -891,11 +850,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
default:
|
||||
return -EINVAL; /* This should never happen. */
|
||||
}
|
||||
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
|
||||
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
|
||||
#else
|
||||
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
||||
#endif
|
||||
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
drm_vm_open(vma);
|
||||
|
|
|
@ -39,12 +39,6 @@
|
|||
#include "i810_drm.h"
|
||||
#include "i810_drv.h"
|
||||
|
||||
#ifdef DO_MUNMAP_4_ARGS
|
||||
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
|
||||
#else
|
||||
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
|
||||
#endif
|
||||
|
||||
#define I810_BUF_FREE 2
|
||||
#define I810_BUF_CLIENT 1
|
||||
#define I810_BUF_HARDWARE 0
|
||||
|
@ -186,7 +180,7 @@ static int i810_unmap_buffer(drm_buf_t * buf)
|
|||
return -EINVAL;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
retcode = DO_MUNMAP(current->mm,
|
||||
retcode = do_munmap(current->mm,
|
||||
(unsigned long)buf_priv->virtual,
|
||||
(size_t) buf->total);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
@ -244,8 +238,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
|
|||
(drm_i810_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
drm_ioremapfree((void *)dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size, dev);
|
||||
drm_core_ioremapfree(&dev_priv->ring.map, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -261,9 +254,9 @@ static int i810_dma_cleanup(drm_device_t * dev)
|
|||
for (i = 0; i < dma->buf_count; i++) {
|
||||
drm_buf_t *buf = dma->buflist[i];
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
|
||||
if (buf_priv->kernel_virtual && buf->total)
|
||||
drm_ioremapfree(buf_priv->kernel_virtual,
|
||||
buf->total, dev);
|
||||
drm_core_ioremapfree(&buf_priv->map, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -336,8 +329,15 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
|
|||
|
||||
*buf_priv->in_use = I810_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
|
||||
buf->total, dev);
|
||||
buf_priv->map.offset = buf->bus_address;
|
||||
buf_priv->map.size = buf->total;
|
||||
buf_priv->map.type = _DRM_AGP;
|
||||
buf_priv->map.flags = 0;
|
||||
buf_priv->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&buf_priv->map, dev);
|
||||
buf_priv->kernel_virtual = buf_priv->map.handle;
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -388,18 +388,24 @@ static int i810_dma_initialize(drm_device_t * dev,
|
|||
dev_priv->ring.End = init->ring_end;
|
||||
dev_priv->ring.Size = init->ring_size;
|
||||
|
||||
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size, dev);
|
||||
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
|
||||
dev_priv->ring.map.size = init->ring_size;
|
||||
dev_priv->ring.map.type = _DRM_AGP;
|
||||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
drm_core_ioremap(&dev_priv->ring.map, dev);
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||
|
||||
dev_priv->w = init->w;
|
||||
|
|
|
@ -61,6 +61,7 @@ typedef struct drm_i810_buf_priv {
|
|||
int currently_mapped;
|
||||
void *virtual;
|
||||
void *kernel_virtual;
|
||||
drm_local_map_t map;
|
||||
} drm_i810_buf_priv_t;
|
||||
|
||||
typedef struct _drm_i810_ring_buffer {
|
||||
|
@ -72,6 +73,7 @@ typedef struct _drm_i810_ring_buffer {
|
|||
int head;
|
||||
int tail;
|
||||
int space;
|
||||
drm_local_map_t map;
|
||||
} drm_i810_ring_buffer_t;
|
||||
|
||||
typedef struct drm_i810_private {
|
||||
|
|
|
@ -41,12 +41,6 @@
|
|||
#include "i830_drm.h"
|
||||
#include "i830_drv.h"
|
||||
|
||||
#ifdef DO_MUNMAP_4_ARGS
|
||||
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
|
||||
#else
|
||||
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
|
||||
#endif
|
||||
|
||||
#define I830_BUF_FREE 2
|
||||
#define I830_BUF_CLIENT 1
|
||||
#define I830_BUF_HARDWARE 0
|
||||
|
@ -174,7 +168,7 @@ static int i830_unmap_buffer(drm_buf_t * buf)
|
|||
return -EINVAL;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
retcode = DO_MUNMAP(current->mm,
|
||||
retcode = do_munmap(current->mm,
|
||||
(unsigned long)buf_priv->virtual,
|
||||
(size_t) buf->total);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
@ -232,8 +226,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
|
|||
(drm_i830_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
drm_ioremapfree((void *)dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size, dev);
|
||||
drm_core_ioremapfree(&dev_priv->ring.map, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -251,8 +244,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
|
|||
drm_buf_t *buf = dma->buflist[i];
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if (buf_priv->kernel_virtual && buf->total)
|
||||
drm_ioremapfree(buf_priv->kernel_virtual,
|
||||
buf->total, dev);
|
||||
drm_core_ioremapfree(&buf_priv->map, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -329,8 +321,14 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
|
|||
|
||||
*buf_priv->in_use = I830_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
|
||||
buf->total, dev);
|
||||
buf_priv->map.offset = buf->bus_address;
|
||||
buf_priv->map.size = buf->total;
|
||||
buf_priv->map.type = _DRM_AGP;
|
||||
buf_priv->map.flags = 0;
|
||||
buf_priv->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&buf_priv->map, dev);
|
||||
buf_priv->kernel_virtual = buf_priv->map.handle;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -382,18 +380,24 @@ static int i830_dma_initialize(drm_device_t * dev,
|
|||
dev_priv->ring.End = init->ring_end;
|
||||
dev_priv->ring.Size = init->ring_size;
|
||||
|
||||
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size, dev);
|
||||
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
|
||||
dev_priv->ring.map.size = init->ring_size;
|
||||
dev_priv->ring.map.type = _DRM_AGP;
|
||||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
drm_core_ioremap(&dev_priv->ring.map, dev);
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||
|
||||
dev_priv->w = init->w;
|
||||
|
|
|
@ -68,6 +68,7 @@ typedef struct drm_i830_buf_priv {
|
|||
int currently_mapped;
|
||||
void __user *virtual;
|
||||
void *kernel_virtual;
|
||||
drm_local_map_t map;
|
||||
} drm_i830_buf_priv_t;
|
||||
|
||||
typedef struct _drm_i830_ring_buffer {
|
||||
|
@ -79,6 +80,7 @@ typedef struct _drm_i830_ring_buffer {
|
|||
int head;
|
||||
int tail;
|
||||
int space;
|
||||
drm_local_map_t map;
|
||||
} drm_i830_ring_buffer_t;
|
||||
|
||||
typedef struct drm_i830_private {
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_dma.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_mem.c
|
|
@ -0,0 +1 @@
|
|||
.
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mach64_dma.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mach64_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mach64_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mach64_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mach64_state.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_dma.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_state.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_ucode.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/mga_warp.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_drm.h
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright 2005 Stephane Marchesin.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
nouveau_PCI_IDS
|
||||
};
|
||||
|
||||
extern drm_ioctl_desc_t nouveau_ioctls[];
|
||||
extern int nouveau_max_ioctl;
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static struct drm_driver driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
|
||||
.load = nouveau_load,
|
||||
.firstopen = nouveau_firstopen,
|
||||
.lastclose = nouveau_lastclose,
|
||||
.unload = nouveau_unload,
|
||||
.preclose = nouveau_preclose,
|
||||
.irq_preinstall = nouveau_irq_preinstall,
|
||||
.irq_postinstall = nouveau_irq_postinstall,
|
||||
.irq_uninstall = nouveau_irq_uninstall,
|
||||
.irq_handler = nouveau_irq_handler,
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.ioctls = nouveau_ioctls,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
|
||||
.compat_ioctl = nouveau_compat_ioctl,
|
||||
#endif
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = probe,
|
||||
.remove = __devexit_p(drm_cleanup_pci),
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
return drm_get_dev(pdev, ent, &driver);
|
||||
}
|
||||
|
||||
static int __init nouveau_init(void)
|
||||
{
|
||||
driver.num_ioctls = nouveau_max_ioctl;
|
||||
return drm_init(&driver, pciidlist);
|
||||
}
|
||||
|
||||
static void __exit nouveau_exit(void)
|
||||
{
|
||||
drm_exit(&driver);
|
||||
}
|
||||
|
||||
module_init(nouveau_init);
|
||||
module_exit(nouveau_exit);
|
||||
|
||||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_fifo.c
|
|
@ -0,0 +1,73 @@
|
|||
/**
|
||||
* \file mga_ioc32.c
|
||||
*
|
||||
* 32-bit ioctl compatibility routines for the MGA DRM.
|
||||
*
|
||||
* \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
|
||||
*
|
||||
*
|
||||
* Copyright (C) Paul Mackerras 2005
|
||||
* Copyright (C) Egbert Eich 2003,2004
|
||||
* Copyright (C) Dave Airlie 2005
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/ioctl32.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
||||
#include "nouveau_drm.h"
|
||||
|
||||
/**
|
||||
* Called whenever a 32-bit process running under a 64-bit kernel
|
||||
* performs an ioctl on /dev/dri/card<n>.
|
||||
*
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param arg user argument.
|
||||
* \return zero on success or negative number on failure.
|
||||
*/
|
||||
long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned int nr = DRM_IOCTL_NR(cmd);
|
||||
drm_ioctl_compat_t *fn = NULL;
|
||||
int ret;
|
||||
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
#if 0
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
|
||||
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
#endif
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn)(filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_mem.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_object.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_reg.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nouveau_state.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nv40_graph.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nv_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r128_cce.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r128_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r128_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r128_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r128_state.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r300_cmdbuf.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/r300_reg.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_cp.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_mem.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_state.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/savage_bci.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/savage_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/savage_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/savage_state.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/sis_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/sis_drv.h
|
|
@ -36,11 +36,7 @@
|
|||
#include "sis_drv.h"
|
||||
|
||||
#if defined(__linux__)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
|
||||
#include <video/sisfb.h>
|
||||
#else
|
||||
#include <linux/sisfb.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define VIDEO_TYPE 0
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../shared-core/tdfx_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_3d_reg.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_dma.c
|
|
@ -217,7 +217,9 @@ via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
|
|||
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
|
||||
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
|
||||
DRM_WRITEMEMORYBARRIER();
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
|
||||
VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -496,10 +498,18 @@ via_dmablit_timer(unsigned long data)
|
|||
|
||||
|
||||
static void
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
via_dmablit_workqueue(void *data)
|
||||
#else
|
||||
via_dmablit_workqueue(struct work_struct *work)
|
||||
#endif
|
||||
{
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
|
||||
drm_device_t *dev = blitq->dev;
|
||||
#else
|
||||
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
|
||||
#endif
|
||||
drm_device_t *dev = blitq->dev;
|
||||
unsigned long irqsave;
|
||||
drm_via_sg_info_t *cur_sg;
|
||||
int cur_released;
|
||||
|
@ -562,12 +572,16 @@ via_init_dmablit(drm_device_t *dev)
|
|||
blitq->num_outstanding = 0;
|
||||
blitq->is_active = 0;
|
||||
blitq->aborting = 0;
|
||||
blitq->blit_lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&blitq->blit_lock);
|
||||
for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
|
||||
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
|
||||
}
|
||||
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
|
||||
#else
|
||||
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
|
||||
#endif
|
||||
init_timer(&blitq->poll_timer);
|
||||
blitq->poll_timer.function = &via_dmablit_timer;
|
||||
blitq->poll_timer.data = (unsigned long) blitq;
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_drm.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_drv.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_drv.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_irq.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_map.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_verifier.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_verifier.h
|
|
@ -0,0 +1 @@
|
|||
../shared-core/via_video.c
|
|
@ -27,3 +27,25 @@ cp linux-core/Makefile.kernel $OUTDIR/Makefile
|
|||
echo "Copying 2.6 Kernel files"
|
||||
cp linux-core/Kconfig $OUTDIR/
|
||||
|
||||
cd $OUTDIR
|
||||
|
||||
rm via_ds.[ch]
|
||||
for i in via*.[ch]
|
||||
do
|
||||
unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DVIA_HAVE_CORE_MM $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
|
||||
rm sis_ds.[ch]
|
||||
for i in sis*.[ch]
|
||||
do
|
||||
unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DSIS_HAVE_CORE_MM $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
|
||||
for i in i915*.[ch]
|
||||
do
|
||||
unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
cd -
|
||||
|
|
|
@ -29,6 +29,7 @@ klibdrminclude_HEADERS = \
|
|||
i915_drm.h \
|
||||
mach64_drm.h \
|
||||
mga_drm.h \
|
||||
nouveau_drm.h \
|
||||
r128_drm.h \
|
||||
radeon_drm.h \
|
||||
savage_drm.h \
|
||||
|
|
|
@ -219,7 +219,9 @@
|
|||
0x1106 0x3122 0 "VIA CLE266"
|
||||
0x1106 0x7205 0 "VIA KM400"
|
||||
0x1106 0x3108 0 "VIA K8M800"
|
||||
0x1106 0x3344 0 "VIA P4VM800PRO"
|
||||
0x1106 0x3344 0 "VIA CN700 / VM800 / P4M800Pro"
|
||||
0x1106 0x3343 0 "VIA P4M890"
|
||||
0x1106 0x3230 VIA_DX9_0 "VIA K8M890"
|
||||
|
||||
[i810]
|
||||
0x8086 0x7121 0 "Intel i810 GMCH"
|
||||
|
@ -463,3 +465,233 @@
|
|||
0x10DE 0x009C NV40 "NVidia 0x009C"
|
||||
0x10DE 0x009D NV40 "NVidia Quadro FX 4500"
|
||||
0x10DE 0x009E NV40 "NVidia 0x009E"
|
||||
|
||||
[nouveau]
|
||||
0x10de 0x0008 NV_03 "EDGE 3D"
|
||||
0x10de 0x0009 NV_03 "EDGE 3D"
|
||||
0x10de 0x0010 NV_03 "Mutara V08"
|
||||
0x10de 0x0020 NV_04 "RIVA TNT"
|
||||
0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro"
|
||||
0x10de 0x0029 NV_04 "RIVA TNT2 Ultra"
|
||||
0x10de 0x002a NV_04 "Riva TnT2"
|
||||
0x10de 0x002b NV_04 "Riva TnT2"
|
||||
0x10de 0x002c NV_04 "Vanta/Vanta LT"
|
||||
0x10de 0x002d NV_04 "RIVA TNT2 Model 64/Model 64 Pro"
|
||||
0x10de 0x002e NV_04 "Vanta"
|
||||
0x10de 0x002f NV_04 "Vanta"
|
||||
0x10de 0x0040 NV_40 "GeForce 6800 Ultra"
|
||||
0x10de 0x0041 NV_40 "GeForce 6800"
|
||||
0x10de 0x0042 NV_40 "GeForce 6800 LE"
|
||||
0x10de 0x0043 NV_40 "NV40.3"
|
||||
0x10de 0x0044 NV_40 "GeForce 6800 XT"
|
||||
0x10de 0x0045 NV_40 "GeForce 6800 GT"
|
||||
0x10de 0x0046 NV_40 "GeForce 6800 GT"
|
||||
0x10de 0x0047 NV_40 "GeForce 6800 GS"
|
||||
0x10de 0x0048 NV_40 "GeForce 6800 XT"
|
||||
0x10de 0x0049 NV_40 "NV40GL"
|
||||
0x10de 0x004d NV_40 "Quadro FX 4000"
|
||||
0x10de 0x004e NV_40 "Quadro FX 4000"
|
||||
0x10de 0x0090 NV_40 "GeForce 7800 GTX"
|
||||
0x10de 0x0091 NV_40 "GeForce 7800 GTX"
|
||||
0x10de 0x0092 NV_40 "GeForce 7800 GT"
|
||||
0x10de 0x0093 NV_40 "GeForce 7800 GS"
|
||||
0x10de 0x0098 NV_40 "GeForce Go 7800"
|
||||
0x10de 0x0099 NV_40 "GE Force Go 7800 GTX"
|
||||
0x10de 0x009d NV_40 "Quadro FX4500"
|
||||
0x10de 0x00a0 NV_04 "Aladdin TNT2"
|
||||
0x10de 0x00c0 NV_40 "GeForce 6800 GS"
|
||||
0x10de 0x00c1 NV_40 "GeForce 6800"
|
||||
0x10de 0x00c2 NV_40 "GeForce 6800 LE"
|
||||
0x10de 0x00c3 NV_40 "Geforce 6800 XT"
|
||||
0x10de 0x00c8 NV_40 "GeForce Go 6800"
|
||||
0x10de 0x00c9 NV_40 "GeForce Go 6800 Ultra"
|
||||
0x10de 0x00cc NV_40 "Quadro FX Go1400"
|
||||
0x10de 0x00cd NV_40 "Quadro FX 3450/4000 SDI"
|
||||
0x10de 0x00ce NV_40 "Quadro FX 1400"
|
||||
0x10de 0x00f0 NV_40 "GeForce 6800/GeForce 6800 Ultra"
|
||||
0x10de 0x00f1 NV_40 "GeForce 6600/GeForce 6600 GT"
|
||||
0x10de 0x00f2 NV_40 "GeForce 6600/GeForce 6600 GT"
|
||||
0x10de 0x00f3 NV_40 "GeForce 6200"
|
||||
0x10de 0x00f4 NV_40 "GeForce 6600 LE"
|
||||
0x10de 0x00f5 NV_40 "GeForce 7800 GS"
|
||||
0x10de 0x00f6 NV_40 "GeForce 6600 GS"
|
||||
0x10de 0x00f8 NV_40 "Quadro FX 3400/4400"
|
||||
0x10de 0x00f9 NV_40 "GeForce 6800 Ultra/GeForce 6800 GT"
|
||||
0x10de 0x00fa NV_30 "GeForce PCX 5750"
|
||||
0x10de 0x00fb NV_30 "GeForce PCX 5900"
|
||||
0x10de 0x00fc NV_30 "Quadro FX 330/GeForce PCX 5300"
|
||||
0x10de 0x00fd NV_30 "Quadro FX 330/Quadro NVS280"
|
||||
0x10de 0x00fe NV_30 "Quadro FX 1300"
|
||||
0x10de 0x00ff NV_17 "GeForce PCX 4300"
|
||||
0x10de 0x0100 NV_10 "GeForce 256 SDR"
|
||||
0x10de 0x0101 NV_10 "GeForce 256 DDR"
|
||||
0x10de 0x0103 NV_10 "Quadro"
|
||||
0x10de 0x0110 NV_11 "GeForce2 MX/MX 400"
|
||||
0x10de 0x0111 NV_11 "GeForce2 MX 100 DDR/200 DDR"
|
||||
0x10de 0x0112 NV_11 "GeForce2 Go"
|
||||
0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go"
|
||||
0x10de 0x0140 NV_40 "GeForce 6600 GT"
|
||||
0x10de 0x0141 NV_40 "GeForce 6600"
|
||||
0x10de 0x0142 NV_40 "GeForce 6600 PCIe"
|
||||
0x10de 0x0144 NV_40 "GeForce Go 6600"
|
||||
0x10de 0x0145 NV_40 "GeForce 6610 XL"
|
||||
0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE"
|
||||
0x10de 0x0148 NV_40 "GeForce Go 6600"
|
||||
0x10de 0x0149 NV_40 "GeForce Go 6600 GT"
|
||||
0x10de 0x014a NV_40 "Quadro NVS 440"
|
||||
0x10de 0x014d NV_17 "Quadro FX 550"
|
||||
0x10de 0x014e NV_40 "Quadro FX 540"
|
||||
0x10de 0x014f NV_40 "GeForce 6200"
|
||||
0x10de 0x0150 NV_15 "GeForce2 GTS/Pro"
|
||||
0x10de 0x0151 NV_15 "GeForce2 Ti"
|
||||
0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner"
|
||||
0x10de 0x0153 NV_15 "Quadro2 Pro"
|
||||
0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)"
|
||||
0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)"
|
||||
0x10de 0x0163 NV_44 "GeForce 6200 LE"
|
||||
0x10de 0x0164 NV_44 "GeForce Go 6200"
|
||||
0x10de 0x0165 NV_44 "Quadro NVS 285"
|
||||
0x10de 0x0166 NV_44 "GeForce Go 6400"
|
||||
0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache"
|
||||
0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache"
|
||||
0x10de 0x0170 NV_17 "GeForce4 MX 460"
|
||||
0x10de 0x0171 NV_17 "GeForce4 MX 440"
|
||||
0x10de 0x0172 NV_17 "GeForce4 MX 420"
|
||||
0x10de 0x0173 NV_17 "GeForce4 MX 440-SE"
|
||||
0x10de 0x0174 NV_17 "GeForce4 440 Go"
|
||||
0x10de 0x0175 NV_17 "GeForce4 420 Go"
|
||||
0x10de 0x0176 NV_17 "GeForce4 420 Go 32M"
|
||||
0x10de 0x0177 NV_17 "GeForce4 460 Go"
|
||||
0x10de 0x0178 NV_17 "Quadro4 550 XGL"
|
||||
0x10de 0x0179 NV_17 "GeForce4 420 Go 32M"
|
||||
0x10de 0x017a NV_17 "Quadro4 200/400 NVS"
|
||||
0x10de 0x017b NV_17 "Quadro4 550 XGL"
|
||||
0x10de 0x017c NV_17 "Quadro4 500 GoGL"
|
||||
0x10de 0x017d NV_17 "GeForce4 410 Go 16M"
|
||||
0x10de 0x0181 NV_17 "GeForce4 MX 440 AGP 8x"
|
||||
0x10de 0x0182 NV_17 "GeForce4 MX 440SE AGP 8x"
|
||||
0x10de 0x0183 NV_17 "GeForce4 MX 420 AGP 8x"
|
||||
0x10de 0x0185 NV_17 "GeForce4 MX 4000 AGP 8x"
|
||||
0x10de 0x0186 NV_17 "GeForce4 448 Go"
|
||||
0x10de 0x0187 NV_17 "GeForce4 488 Go"
|
||||
0x10de 0x0188 NV_17 "Quadro4 580 XGL"
|
||||
0x10de 0x018a NV_17 "Quadro4 NVS AGP 8x"
|
||||
0x10de 0x018b NV_17 "Quadro4 380 XGL"
|
||||
0x10de 0x018c NV_17 "Quadro NVS 50 PCI"
|
||||
0x10de 0x018d NV_17 "GeForce4 448 Go"
|
||||
0x10de 0x0191 NV_50 "GeForce 8800 GTX"
|
||||
0x10de 0x0193 NV_50 "GeForce 8800 GTS"
|
||||
0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics"
|
||||
0x10de 0x01d1 NV_44 "GeForce 7300 LE"
|
||||
0x10de 0x01d6 NV_44 "GeForce Go 7200"
|
||||
0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300"
|
||||
0x10de 0x01d8 NV_44 "GeForce Go 7400"
|
||||
0x10de 0x01da NV_44 "Quadro NVS 110M"
|
||||
0x10de 0x01df NV_44 "GeForce 7300 GS"
|
||||
0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU"
|
||||
0x10de 0x0200 NV_20 "GeForce3"
|
||||
0x10de 0x0201 NV_20 "GeForce3 Ti 200"
|
||||
0x10de 0x0202 NV_20 "GeForce3 Ti 500"
|
||||
0x10de 0x0203 NV_20 "Quadro DCC"
|
||||
0x10de 0x0211 NV_40 "GeForce 6800"
|
||||
0x10de 0x0212 NV_40 "GeForce 6800 LE"
|
||||
0x10de 0x0215 NV_40 "GeForce 6800 GT"
|
||||
0x10de 0x0218 NV_40 "GeForce 6800 XT"
|
||||
0x10de 0x0221 NV_44 "GeForce 6200"
|
||||
0x10de 0x0240 NV_44 "GeForce 6150"
|
||||
0x10de 0x0242 NV_44 "GeForce 6100"
|
||||
0x10de 0x0250 NV_25 "GeForce4 Ti 4600"
|
||||
0x10de 0x0251 NV_25 "GeForce4 Ti 4400"
|
||||
0x10de 0x0252 NV_25 "GeForce4 Ti"
|
||||
0x10de 0x0253 NV_25 "GeForce4 Ti 4200"
|
||||
0x10de 0x0258 NV_25 "Quadro4 900 XGL"
|
||||
0x10de 0x0259 NV_25 "Quadro4 750 XGL"
|
||||
0x10de 0x025b NV_25 "Quadro4 700 XGL"
|
||||
0x10de 0x0280 NV_25 "GeForce4 Ti 4800"
|
||||
0x10de 0x0281 NV_25 "GeForce4 Ti 4200 AGP 8x"
|
||||
0x10de 0x0282 NV_25 "GeForce4 Ti 4800 SE"
|
||||
0x10de 0x0286 NV_25 "GeForce4 Ti 4200 Go AGP 8x"
|
||||
0x10de 0x0288 NV_25 "Quadro4 980 XGL"
|
||||
0x10de 0x0289 NV_25 "Quadro4 780 XGL"
|
||||
0x10de 0x028c NV_25 "Quadro4 700 GoGL"
|
||||
0x10de 0x0290 NV_40 "GeForce 7900 GTX"
|
||||
0x10de 0x0291 NV_40 "GeForce 7900 GT"
|
||||
0x10de 0x0292 NV_40 "GeForce 7900 GS"
|
||||
0x10de 0x0298 NV_40 "GeForce Go 7900 GS"
|
||||
0x10de 0x0299 NV_40 "GeForce Go 7900 GTX"
|
||||
0x10de 0x029a NV_40 "Quadro FX 2500M"
|
||||
0x10de 0x029b NV_40 "Quadro FX 1500M"
|
||||
0x10de 0x029c NV_40 "Quadro FX 5500"
|
||||
0x10de 0x029d NV_40 "Quadro FX 3500"
|
||||
0x10de 0x029e NV_40 "Quadro FX 1500"
|
||||
0x10de 0x029f NV_40 "Quadro FX 4500 X2"
|
||||
0x10de 0x02a0 NV_20 "XGPU"
|
||||
0x10de 0x02e1 NV_40 "GeForce 7600 GS"
|
||||
0x10de 0x0300 NV_30 "GeForce FX"
|
||||
0x10de 0x0301 NV_30 "GeForce FX 5800 Ultra"
|
||||
0x10de 0x0302 NV_30 "GeForce FX 5800"
|
||||
0x10de 0x0308 NV_30 "Quadro FX 2000"
|
||||
0x10de 0x0309 NV_30 "Quadro FX 1000"
|
||||
0x10de 0x0311 NV_30 "GeForce FX 5600 Ultra"
|
||||
0x10de 0x0312 NV_30 "GeForce FX 5600"
|
||||
0x10de 0x0313 NV_30 "NV31"
|
||||
0x10de 0x0314 NV_30 "GeForce FX 5600XT"
|
||||
0x10de 0x0316 NV_30 "NV31M"
|
||||
0x10de 0x0317 NV_30 "NV31M Pro"
|
||||
0x10de 0x031a NV_30 "GeForce FX Go5600"
|
||||
0x10de 0x031b NV_30 "GeForce FX Go5650"
|
||||
0x10de 0x031d NV_30 "NV31GLM"
|
||||
0x10de 0x031e NV_30 "NV31GLM Pro"
|
||||
0x10de 0x031f NV_30 "NV31GLM Pro"
|
||||
0x10de 0x0320 NV_34 "GeForce FX 5200"
|
||||
0x10de 0x0321 NV_34 "GeForce FX 5200 Ultra"
|
||||
0x10de 0x0322 NV_34 "GeForce FX 5200"
|
||||
0x10de 0x0323 NV_34 "GeForce FX 5200LE"
|
||||
0x10de 0x0324 NV_34 "GeForce FX Go5200"
|
||||
0x10de 0x0325 NV_34 "GeForce FX Go5250"
|
||||
0x10de 0x0326 NV_34 "GeForce FX 5500"
|
||||
0x10de 0x0327 NV_34 "GeForce FX 5100"
|
||||
0x10de 0x0328 NV_34 "GeForce FX Go5200 32M/64M"
|
||||
0x10de 0x0329 NV_34 "GeForce FX Go5200"
|
||||
0x10de 0x032a NV_34 "Quadro NVS 280 PCI"
|
||||
0x10de 0x032b NV_34 "Quadro FX 500/600 PCI"
|
||||
0x10de 0x032c NV_34 "GeForce FX Go 5300"
|
||||
0x10de 0x032d NV_34 "GeForce FX Go5100"
|
||||
0x10de 0x032f NV_34 "NV34GL"
|
||||
0x10de 0x0330 NV_30 "GeForce FX 5900 Ultra"
|
||||
0x10de 0x0331 NV_30 "GeForce FX 5900"
|
||||
0x10de 0x0332 NV_30 "GeForce FX 5900XT"
|
||||
0x10de 0x0333 NV_30 "GeForce FX 5950 Ultra"
|
||||
0x10de 0x0334 NV_30 "GeForce FX 5900ZT"
|
||||
0x10de 0x0338 NV_30 "Quadro FX 3000"
|
||||
0x10de 0x033f NV_30 "Quadro FX 700"
|
||||
0x10de 0x0341 NV_30 "GeForce FX 5700 Ultra"
|
||||
0x10de 0x0342 NV_30 "GeForce FX 5700"
|
||||
0x10de 0x0343 NV_30 "GeForce FX 5700LE"
|
||||
0x10de 0x0344 NV_30 "GeForce FX 5700VE"
|
||||
0x10de 0x0345 NV_30 "NV36.5"
|
||||
0x10de 0x0347 NV_30 "GeForce FX Go5700"
|
||||
0x10de 0x0348 NV_30 "GeForce FX Go5700"
|
||||
0x10de 0x0349 NV_30 "NV36M Pro"
|
||||
0x10de 0x034b NV_30 "NV36MAP"
|
||||
0x10de 0x034c NV_30 "Quadro FX Go1000"
|
||||
0x10de 0x034e NV_30 "Quadro FX 1100"
|
||||
0x10de 0x034f NV_30 "NV36GL"
|
||||
0x10de 0x0391 NV_40 "GeForce 7600 GT"
|
||||
0x10de 0x0392 NV_40 "GeForce 7600 GS"
|
||||
0x10de 0x0393 NV_40 "GeForce 7300 GT"
|
||||
0x10de 0x0398 NV_40 "GeForce Go 7600"
|
||||
0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430"
|
||||
0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405"
|
||||
0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400"
|
||||
0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420"
|
||||
0x12d2 0x0008 NV_03 "NV1"
|
||||
0x12d2 0x0009 NV_03 "DAC64"
|
||||
0x12d2 0x0018 NV_03 "Riva128"
|
||||
0x12d2 0x0019 NV_03 "Riva128ZX"
|
||||
0x12d2 0x0020 NV_04 "TNT"
|
||||
0x12d2 0x0028 NV_04 "TNT2"
|
||||
0x12d2 0x0029 NV_04 "UTNT2"
|
||||
0x12d2 0x002c NV_04 "VTNT2"
|
||||
0x12d2 0x00a0 NV_04 "ITNT2"
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue