Merge branch 'origin' into bo-set-pin

main
Eric Anholt 2007-09-19 15:55:58 -07:00
commit 3d3a96ad4e
67 changed files with 6927 additions and 857 deletions

3
.gitignore vendored
View File

@ -55,6 +55,9 @@ tests/auth
tests/dristat
tests/drmstat
tests/getclient
tests/getstats
tests/getversion
tests/lock
tests/openclose
tests/setversion
tests/updatedraw

View File

@ -1,6 +1,3 @@
/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
* Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
*/
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -29,6 +26,11 @@
*
*/
/** @file ati_pcigart.c
* Implementation of ATI's PCIGART, which provides an aperture in card virtual
* address space with addresses remapped to system memory.
*/
#include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */

View File

@ -1,6 +1,3 @@
/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
* Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
*
*/
/** @file drm_agpsupport.c
* Support code for tying the kernel AGP support to DRM drivers and
* the DRM's AGP ioctls.
*/
#include "drmP.h"
#ifdef __FreeBSD__
@ -182,7 +184,6 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode);
dev->agp->base = dev->agp->info.ai_aperture_base;
dev->agp->enabled = 1;
return 0;
}
@ -403,6 +404,7 @@ drm_agp_head_t *drm_agp_init(void)
return NULL;
head->agpdev = agpdev;
agp_get_info(agpdev, &head->info);
head->base = head->info.ai_aperture_base;
head->memory = NULL;
DRM_INFO("AGP at 0x%08lx %dMB\n",
(long)head->info.ai_aperture_base,

View File

@ -1,6 +1,3 @@
/* drm_auth.c -- IOCTLs for authentication -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
*
*/
/** @file drm_auth.c
* Implementation of the get/authmagic ioctls implementing the authentication
* scheme between the master and clients.
*/
#include "drmP.h"
static int drm_hash_magic(drm_magic_t magic)

View File

@ -1,6 +1,3 @@
/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,10 @@
*
*/
/** @file drm_bufs.c
* Implementation of the ioctls for setup of DRM mappings and DMA buffers.
*/
#include "dev/pci/pcireg.h"
#include "drmP.h"
@ -190,7 +191,17 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
break;
case _DRM_AGP:
/*valid = 0;*/
map->offset += dev->agp->base;
/* In some cases (i810 driver), user space may have already
* added the AGP base itself, because dev->agp->base previously
* only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->info.ai_aperture_size - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->mtrr; /* for getmap */
/*for (entry = dev->agp->memory; entry; entry = entry->next) {
if ((map->offset >= entry->bound) &&

View File

@ -1,6 +1,3 @@
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,10 @@
*
*/
/** @file drm_context.c
* Implementation of the context management ioctls.
*/
#include "drmP.h"
/* ================================================================

View File

@ -1,6 +1,3 @@
/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,14 @@
*
*/
/** @file drm_dma.c
* Support code for DMA buffer management.
*
* The implementation used to be significantly more complicated, but the
* complexity has been moved into the drivers as different buffer management
* schemes evolved.
*/
#include "drmP.h"
int drm_dma_setup(drm_device_t *dev)

View File

@ -1,6 +1,3 @@
/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
*
*/
/** @file drm_drawable.c
* This file implements ioctls to store information along with DRM drawables,
* such as the current set of cliprects for vblank-synced buffer swaps.
*/
#include "drmP.h"
struct bsd_drm_drawable_info {

View File

@ -1,6 +1,3 @@
/* drm_drv.h -- Generic driver template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,12 @@
*
*/
/** @file drm_drv.c
* The catch-all file for DRM device support, including module setup/teardown,
* open/close, and ioctl dispatch.
*/
#include <sys/limits.h>
#include "drmP.h"
#include "drm.h"
@ -818,14 +821,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p)
{
#ifdef __FreeBSD__
drm_device_t *dev = kdev->si_drv1;
#elif defined(__NetBSD__)
drm_device_t *dev = device_lookup(&drm_cd, minor(kdev));
#else
drm_device_t *dev = device_lookup(&drm_cd,
minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)];
#endif
drm_device_t *dev = drm_get_device_from_kdev(kdev);
int retcode = 0;
drm_ioctl_desc_t *ioctl;
int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv);
@ -912,15 +908,13 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
((ioctl->flags & DRM_MASTER) && !file_priv->master))
return EACCES;
if (is_driver_ioctl)
DRM_LOCK();
retcode = func(dev, data, file_priv);
if (is_driver_ioctl) {
DRM_LOCK();
/* shared code returns -errno */
retcode = -func(dev, data, file_priv);
DRM_UNLOCK();
/* Driver ioctls in shared code follow the linux convention of
* returning -errno instead of errno.
*/
retcode = -retcode;
} else {
retcode = func(dev, data, file_priv);
}
if (retcode != 0)

View File

@ -1,6 +1,3 @@
/* drm_fops.h -- File operations for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -32,6 +29,11 @@
*
*/
/** @file drm_fops.c
* Support code for dealing with the file privates associated with each
* open of the DRM device.
*/
#include "drmP.h"
drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p)

View File

@ -1,6 +1,3 @@
/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
*
*/
/** @file drm_ioctl.c
* Varios minor DRM ioctls not applicable to other files, such as versioning
* information and reporting DRM information to userland.
*/
#include "drmP.h"
/*
@ -203,7 +205,7 @@ int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv)
drm_stats_t *stats = data;
int i;
memset(&stats, 0, sizeof(stats));
memset(stats, 0, sizeof(drm_stats_t));
DRM_LOCK();
@ -230,23 +232,27 @@ int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv)
int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
drm_set_version_t *sv = data;
drm_set_version_t retv;
drm_set_version_t ver;
int if_version;
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = dev->driver.major;
retv.drm_dd_minor = dev->driver.minor;
/* Save the incoming data, and set the response before continuing
* any further.
*/
ver = *sv;
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver.major;
sv->drm_dd_minor = dev->driver.minor;
if (sv->drm_di_major != -1) {
if (sv->drm_di_major != DRM_IF_MAJOR ||
sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
if (ver.drm_di_major != -1) {
if (ver.drm_di_major != DRM_IF_MAJOR ||
ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
return EINVAL;
}
if_version = DRM_IF_VERSION(sv->drm_di_major,
sv->drm_dd_minor);
if_version = DRM_IF_VERSION(ver.drm_di_major,
ver.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
if (sv->drm_di_minor >= 1) {
if (ver.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
@ -254,10 +260,10 @@ int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv)
}
}
if (sv->drm_dd_major != -1) {
if (sv->drm_dd_major != dev->driver.major ||
sv->drm_dd_minor < 0 ||
sv->drm_dd_minor > dev->driver.minor)
if (ver.drm_dd_major != -1) {
if (ver.drm_dd_major != dev->driver.major ||
ver.drm_dd_minor < 0 ||
ver.drm_dd_minor > dev->driver.minor)
{
return EINVAL;
}

View File

@ -1,6 +1,3 @@
/* drm_irq.c -- IRQ IOCTL and function support
* Created: Fri Oct 18 2003 by anholt@FreeBSD.org
*/
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
@ -28,6 +25,11 @@
*
*/
/** @file drm_irq.c
* Support code for handling setup/teardown of interrupt handlers and
* handing interrupt handlers off to the drivers.
*/
#include "drmP.h"
#include "drm.h"

View File

@ -1,6 +1,3 @@
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,25 @@
*
*/
/** @file drm_lock.c
* Implementation of the ioctls and other support code for dealing with the
* hardware lock.
*
* The DRM hardware lock is a shared structure between the kernel and userland.
*
* On uncontended access where the new context was the last context, the
* client may take the lock without dropping down into the kernel, using atomic
* compare-and-set.
*
* If the client finds during compare-and-set that it was not the last owner
* of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
* lock, and may have side-effects of kernel-managed context switching.
*
* When the client releases the lock, if the lock is marked as being contended
* by another client, then the DRM unlock ioctl is called so that the
* contending client may be woken up.
*/
#include "drmP.h"
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
@ -157,6 +173,12 @@ int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv)
DRM_CURRENTPID, lock->context);
return EINVAL;
}
/* Check that the context unlock being requested actually matches
* who currently holds the lock.
*/
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
return EINVAL;
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);

View File

@ -1,6 +1,3 @@
/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*/
/*-
*Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,14 @@
*
*/
/** @file drm_memory.c
* Wrappers for kernel memory allocation routines, and MTRR management support.
*
* This file previously implemented a memory consumption tracking system using
* the "area" argument for various different types of allocations, but that
* has been stripped out for now.
*/
#include "drmP.h"
MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");

View File

@ -1,10 +1,3 @@
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory functions.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
/*-
* Copyright 2003 Eric Anholt.
* All Rights Reserved.
@ -28,6 +21,13 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory allocation.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
#include "drmP.h"
/**********************************************************************/

View File

@ -1,5 +1,3 @@
/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
* Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -29,6 +27,13 @@
*
*/
/** @file drm_scatter.c
* Allocation of memory for scatter-gather mappings by the graphics chip.
*
* The memory allocated here is then made into an aperture in the card
* by drm_ati_pcigart_init().
*/
#include "drmP.h"
#define DEBUG_SCATTER 0

View File

@ -21,6 +21,11 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/** @file drm_sysctl.c
* Implementation of various sysctls for controlling DRM behavior and reporting
* debug information.
*/
#include "drmP.h"
#include "drm.h"

View File

@ -21,6 +21,10 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/** @file drm_vm.c
* Support code for mmaping of DRM maps.
*/
#include "drmP.h"
#include "drm.h"

View File

@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
mach64.o nv.o nouveau.o
mach64.o nv.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@ -91,6 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
PROGS = dristat drmstat
@ -285,6 +286,7 @@ CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
CONFIG_DRM_XGI := n
# Enable module builds for the modules requested/supported.
@ -321,6 +323,9 @@ endif
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
endif
ifneq (,$(findstring xgi,$(DRM_MODULES)))
CONFIG_DRM_XGI := m
endif
# These require AGP support
@ -348,6 +353,7 @@ $(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
$(nouveau-objs): $(NOUVEAUHEADERS)
$(xgi-objs): $(XGIHEADERS)
endif

View File

@ -38,6 +38,8 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
xgi_fence.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
@ -46,6 +48,7 @@ mga-objs += mga_ioc32.o
r128-objs += r128_ioc32.o
i915-objs += i915_ioc32.o
nouveau-objs += nouveau_ioc32.o
xgi-objs += xgi_ioc32.o
endif
obj-m += drm.o
@ -62,3 +65,4 @@ obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
obj-$(CONFIG_DRM_XGI) += xgi.o

View File

@ -231,7 +231,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
*/
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->agp_info.aper_size * 1024 * 1024) {
dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->agp_mtrr; /* for getmap */

View File

@ -678,4 +678,51 @@ void idr_remove_all(struct idr *idp)
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
#endif /* DRM_IDR_COMPAT_FN */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
/**
* idr_replace - replace pointer for given id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: lookup key
*
* Replace the pointer registered with an id and return the old value.
* A -ENOENT return indicates that @id was not found.
* A -EINVAL return indicates that @id was not within valid constraints.
*
* The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
*/
void *idr_replace(struct idr *idp, void *ptr, int id)
{
int n;
struct idr_layer *p, *old_p;
n = idp->layers * IDR_BITS;
p = idp->top;
id &= MAX_ID_MASK;
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
n -= IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
}
n = id & IDR_MASK;
if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
p->ary[n] = ptr;
return (void *)old_p;
}
EXPORT_SYMBOL(idr_replace);
#endif

View File

@ -316,4 +316,13 @@ int idr_for_each(struct idr *idp,
void idr_remove_all(struct idr *idp);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
void *idr_replace(struct idr *idp, void *ptr, int id);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
typedef _Bool bool;
#endif
#endif

View File

@ -1051,8 +1051,13 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn;
int ret;
/* Assume that ioctls without an explicit compat routine will "just
* work". This may not always be a good assumption, but it's better
* than always failing.
*/
if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls))
return -ENOTTY;
return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
fn = drm_compat_ioctls[nr];

View File

@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func {
/* This is the init structure after v1.2 */
typedef struct _drm_i810_init {
drm_i810_init_func_t func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int ring_map_idx;
int buffer_map_idx;
#else
unsigned int mmio_offset;
unsigned int buffers_offset;
#endif
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;

View File

@ -316,3 +316,20 @@ nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
{
}
int
nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte;
pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
if (dev_priv->card_type < NV_50) {
*page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
return 0;
}
DRM_ERROR("Unimplemented on NV50\n");
return -EINVAL;
}

322
linux-core/xgi_cmdlist.c Normal file
View File

@ -0,0 +1,322 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
static void xgi_emit_flush(struct xgi_info * info, bool stop);
static void xgi_emit_nop(struct xgi_info * info);
static unsigned int get_batch_command(enum xgi_batch_type type);
static void triggerHWCommandList(struct xgi_info * info);
static void xgi_cmdlist_reset(struct xgi_info * info);
/**
* Graphic engine register (2d/3d) acessing interface
*/
static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
{
#ifdef XGI_MMIO_DEBUG
DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
map->handle, addr, data);
#endif
DRM_WRITE32(map, addr, data);
}
int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
struct drm_file * filp)
{
struct xgi_mem_alloc mem_alloc = {
.location = XGI_MEMLOC_NON_LOCAL,
.size = size,
};
int err;
err = xgi_alloc(info, &mem_alloc, filp);
if (err) {
return err;
}
info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
info->cmdring.size = mem_alloc.size;
info->cmdring.ring_hw_base = mem_alloc.hw_addr;
info->cmdring.last_ptr = NULL;
info->cmdring.ring_offset = 0;
return 0;
}
/**
* get_batch_command - Get the command ID for the current begin type.
* @type: Type of the current batch
*
* See section 3.2.2 "Begin" (page 15) of the 3D SPG.
*
* This function assumes that @type is on the range [0,3].
*/
unsigned int get_batch_command(enum xgi_batch_type type)
{
static const unsigned int ports[4] = {
0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
};
return ports[type];
}
int xgi_submit_cmdlist(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *const info = dev->dev_private;
const struct xgi_cmd_info *const pCmdInfo =
(struct xgi_cmd_info *) data;
const unsigned int cmd = get_batch_command(pCmdInfo->type);
u32 begin[4];
begin[0] = (cmd << 24) | BEGIN_VALID_MASK
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
begin[2] = pCmdInfo->hw_addr >> 4;
begin[3] = 0;
if (info->cmdring.last_ptr == NULL) {
const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
/* Enable PCI Trigger Mode
*/
dwWriteReg(info->mmio_map,
BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
M2REG_CLEAR_COUNTERS_MASK | 0x08 |
M2REG_PCI_TRIGGER_MODE_MASK);
dwWriteReg(info->mmio_map,
BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
M2REG_PCI_TRIGGER_MODE_MASK);
/* Send PCI begin command
*/
dwWriteReg(info->mmio_map, portOffset, begin[0]);
dwWriteReg(info->mmio_map, portOffset + 4, begin[1]);
dwWriteReg(info->mmio_map, portOffset + 8, begin[2]);
dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
} else {
DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
if (pCmdInfo->type == BTYPE_3D) {
xgi_emit_flush(info, FALSE);
}
info->cmdring.last_ptr[1] = begin[1];
info->cmdring.last_ptr[2] = begin[2];
info->cmdring.last_ptr[3] = begin[3];
DRM_WRITEMEMORYBARRIER();
info->cmdring.last_ptr[0] = begin[0];
triggerHWCommandList(info);
}
info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
drm_fence_flush_old(info->dev, 0, info->next_sequence);
return 0;
}
/*
state: 0 - console
1 - graphic
2 - fb
3 - logout
*/
int xgi_state_change(struct xgi_info * info, unsigned int to,
unsigned int from)
{
#define STATE_CONSOLE 0
#define STATE_GRAPHIC 1
#define STATE_FBTERM 2
#define STATE_LOGOUT 3
#define STATE_REBOOT 4
#define STATE_SHUTDOWN 5
if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
DRM_INFO("Leaving graphical mode (probably VT switch)\n");
} else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
DRM_INFO("Entering graphical mode (probably VT switch)\n");
xgi_cmdlist_reset(info);
} else if ((from == STATE_GRAPHIC)
&& ((to == STATE_LOGOUT)
|| (to == STATE_REBOOT)
|| (to == STATE_SHUTDOWN))) {
DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
} else {
DRM_ERROR("Invalid state change.\n");
return -EINVAL;
}
return 0;
}
int xgi_state_change_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_state_info *const state =
(struct xgi_state_info *) data;
struct xgi_info *info = dev->dev_private;
return xgi_state_change(info, state->_toState, state->_fromState);
}
void xgi_cmdlist_reset(struct xgi_info * info)
{
info->cmdring.last_ptr = NULL;
info->cmdring.ring_offset = 0;
}
void xgi_cmdlist_cleanup(struct xgi_info * info)
{
if (info->cmdring.ring_hw_base != 0) {
/* If command lists have been issued, terminate the command
* list chain with a flush command.
*/
if (info->cmdring.last_ptr != NULL) {
xgi_emit_flush(info, FALSE);
xgi_emit_nop(info);
}
xgi_waitfor_pci_idle(info);
(void) memset(&info->cmdring, 0, sizeof(info->cmdring));
}
}
static void triggerHWCommandList(struct xgi_info * info)
{
static unsigned int s_triggerID = 1;
dwWriteReg(info->mmio_map,
BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
0x05000000 + (0x0ffff & s_triggerID++));
}
/**
* Emit a flush to the CRTL command stream.
* @info XGI info structure
*
* This function assumes info->cmdring.ptr is non-NULL.
*/
void xgi_emit_flush(struct xgi_info * info, bool stop)
{
const u32 flush_command[8] = {
((0x10 << 24)
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
BEGIN_LINK_ENABLE_MASK | (0x00004),
0x00000000, 0x00000000,
/* Flush the 2D engine with the default 32 clock delay.
*/
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
};
const unsigned int flush_size = sizeof(flush_command);
u32 *batch_addr;
u32 hw_addr;
/* check buf is large enough to contain a new flush batch */
if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
info->cmdring.ring_offset = 0;
}
hw_addr = info->cmdring.ring_hw_base
+ info->cmdring.ring_offset;
batch_addr = info->cmdring.ptr
+ (info->cmdring.ring_offset / 4);
(void) memcpy(batch_addr, flush_command, flush_size);
if (stop) {
*batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK;
}
info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4);
info->cmdring.last_ptr[2] = hw_addr >> 4;
info->cmdring.last_ptr[3] = 0;
DRM_WRITEMEMORYBARRIER();
info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
| (BEGIN_VALID_MASK);
triggerHWCommandList(info);
info->cmdring.ring_offset += flush_size;
info->cmdring.last_ptr = batch_addr;
}
/**
* Emit an empty command to the CRTL command stream.
* @info XGI info structure
*
* This function assumes info->cmdring.ptr is non-NULL. In addition, since
* this function emits a command that does not have linkage information,
* it sets info->cmdring.ptr to NULL.
*/
void xgi_emit_nop(struct xgi_info * info)
{
info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
info->cmdring.last_ptr[2] = 0;
info->cmdring.last_ptr[3] = 0;
DRM_WRITEMEMORYBARRIER();
info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
| (BEGIN_VALID_MASK);
triggerHWCommandList(info);
info->cmdring.last_ptr = NULL;
}
void xgi_emit_irq(struct xgi_info * info)
{
if (info->cmdring.last_ptr == NULL)
return;
xgi_emit_flush(info, TRUE);
}

66
linux-core/xgi_cmdlist.h Normal file
View File

@ -0,0 +1,66 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_CMDLIST_H_
#define _XGI_CMDLIST_H_
struct xgi_cmdring_info {
/**
* Kernel space pointer to the base of the command ring.
*/
u32 * ptr;
/**
* Size, in bytes, of the command ring.
*/
unsigned int size;
/**
* Base address of the command ring from the hardware's PoV.
*/
unsigned int ring_hw_base;
u32 * last_ptr;
/**
* Offset, in bytes, from the start of the ring to the next available
* location to store a command.
*/
unsigned int ring_offset;
};
struct xgi_info;
extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
struct drm_file * filp);
extern int xgi_state_change(struct xgi_info * info, unsigned int to,
unsigned int from);
extern void xgi_cmdlist_cleanup(struct xgi_info * info);
extern void xgi_emit_irq(struct xgi_info * info);
#endif /* _XGI_CMDLIST_H_ */

1
linux-core/xgi_drm.h Symbolic link
View File

@ -0,0 +1 @@
../shared-core/xgi_drm.h

431
linux-core/xgi_drv.c Normal file
View File

@ -0,0 +1,431 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "drmP.h"
#include "drm.h"
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
#include "drm_pciids.h"
static struct pci_device_id pciidlist[] = {
xgi_PCI_IDS
};
static struct drm_fence_driver xgi_fence_driver = {
.num_classes = 1,
.wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
.flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
.sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
.lazy_capable = 1,
.emit = xgi_fence_emit_sequence,
.poke_flush = xgi_poke_flush,
.has_irq = xgi_fence_has_irq
};
int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
static struct drm_ioctl_desc xgi_ioctls[] = {
DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
};
static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
static int xgi_driver_unload(struct drm_device *dev);
static void xgi_driver_lastclose(struct drm_device * dev);
static void xgi_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file * filp);
static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
static struct drm_driver driver = {
.driver_features =
DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_SG,
.dev_priv_size = sizeof(struct xgi_info),
.load = xgi_driver_load,
.unload = xgi_driver_unload,
.lastclose = xgi_driver_lastclose,
.dma_quiescent = NULL,
.irq_preinstall = NULL,
.irq_postinstall = NULL,
.irq_uninstall = NULL,
.irq_handler = xgi_kern_isr,
.reclaim_buffers = drm_core_reclaim_buffers,
.reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = xgi_ioctls,
.dma_ioctl = NULL,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
.compat_ioctl = xgi_compat_ioctl,
#endif
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
.fence_driver = &xgi_fence_driver,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_dev(pdev, ent, &driver);
}
static int __init xgi_init(void)
{
driver.num_ioctls = xgi_max_ioctl;
return drm_init(&driver, pciidlist);
}
static void __exit xgi_exit(void)
{
drm_exit(&driver);
}
module_init(xgi_init);
module_exit(xgi_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
void xgi_engine_init(struct xgi_info * info)
{
u8 temp;
OUT3C5B(info->mmio_map, 0x11, 0x92);
/* -------> copy from OT2D
* PCI Retry Control Register.
* disable PCI read retry & enable write retry in mem. (10xx xxxx)b
*/
temp = IN3X5B(info->mmio_map, 0x55);
OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
xgi_enable_ge(info);
/* Enable linear addressing of the card. */
temp = IN3X5B(info->mmio_map, 0x21);
OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
/* Enable 32-bit internal data path */
temp = IN3X5B(info->mmio_map, 0x2A);
OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
/* Enable PCI burst write ,disable burst read and enable MMIO. */
/*
* 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
* 7 ---- Pixel Data Format 1: big endian 0: little endian
* 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format
* 2 ---- PCI Burst Write Enable
* 1 ---- PCI Burst Read Enable
* 0 ---- MMIO Control
*/
temp = IN3X5B(info->mmio_map, 0x39);
OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
/* enable GEIO decode */
/* temp = IN3X5B(info->mmio_map, 0x29);
* OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
*/
/* Enable graphic engine I/O PCI retry function*/
/* temp = IN3X5B(info->mmio_map, 0x62);
* OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
*/
/* protect all register except which protected by 3c5.0e.7 */
/* OUT3C5B(info->mmio_map, 0x11, 0x87); */
}
int xgi_bootstrap(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *info = dev->dev_private;
struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
struct drm_map_list *maplist;
int err;
DRM_SPININIT(&info->fence_lock, "fence lock");
info->next_sequence = 0;
info->complete_sequence = 0;
if (info->mmio_map == NULL) {
err = drm_addmap(dev, info->mmio.base, info->mmio.size,
_DRM_REGISTERS, _DRM_KERNEL,
&info->mmio_map);
if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
xgi_enable_mmio(info);
xgi_engine_init(info);
}
info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n",
(unsigned long) info->fb.base, info->fb.size);
if ((info->fb.base == 0) || (info->fb.size == 0)) {
DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
(unsigned long) info->fb.base, info->fb.size);
return -EINVAL;
}
/* Init the resource manager */
if (!info->fb_heap_initialized) {
err = xgi_fb_heap_init(info);
if (err) {
DRM_ERROR("Unable to initialize FB heap.\n");
return err;
}
}
info->pcie.size = bs->gart.size;
/* Init the resource manager */
if (!info->pcie_heap_initialized) {
err = xgi_pcie_heap_init(info);
if (err) {
DRM_ERROR("Unable to initialize GART heap.\n");
return err;
}
/* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
err = xgi_cmdlist_initialize(info, 0x100000, filp);
if (err) {
DRM_ERROR("xgi_cmdlist_initialize() failed\n");
return err;
}
}
if (info->pcie_map == NULL) {
err = drm_addmap(info->dev, 0, info->pcie.size,
_DRM_SCATTER_GATHER, _DRM_LOCKED,
& info->pcie_map);
if (err) {
DRM_ERROR("Could not add map for GART backing "
"store.\n");
return err;
}
}
maplist = drm_find_matching_map(dev, info->pcie_map);
if (maplist == NULL) {
DRM_ERROR("Could not find GART backing store map.\n");
return -EINVAL;
}
bs->gart = *info->pcie_map;
bs->gart.handle = (void *)(unsigned long) maplist->user_token;
return 0;
}
void xgi_driver_lastclose(struct drm_device * dev)
{
struct xgi_info * info = dev->dev_private;
if (info != NULL) {
if (info->mmio_map != NULL) {
xgi_cmdlist_cleanup(info);
xgi_disable_ge(info);
xgi_disable_mmio(info);
}
/* The core DRM lastclose routine will destroy all of our
* mappings for us. NULL out the pointers here so that
* xgi_bootstrap can do the right thing.
*/
info->pcie_map = NULL;
info->mmio_map = NULL;
info->fb_map = NULL;
if (info->pcie_heap_initialized) {
drm_ati_pcigart_cleanup(dev, &info->gart_info);
}
if (info->fb_heap_initialized
|| info->pcie_heap_initialized) {
drm_sman_cleanup(&info->sman);
info->fb_heap_initialized = FALSE;
info->pcie_heap_initialized = FALSE;
}
}
}
void xgi_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file * filp)
{
struct xgi_info * info = dev->dev_private;
mutex_lock(&info->dev->struct_mutex);
if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
mutex_unlock(&info->dev->struct_mutex);
return;
}
if (dev->driver->dma_quiescent) {
dev->driver->dma_quiescent(dev);
}
drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
mutex_unlock(&info->dev->struct_mutex);
return;
}
/*
* driver receives an interrupt if someone waiting, then hand it off.
*/
irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct xgi_info *info = dev->dev_private;
const u32 irq_bits = DRM_READ32(info->mmio_map,
(0x2800
+ M2REG_AUTO_LINK_STATUS_ADDRESS))
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
| M2REG_ACTIVE_INTERRUPT_0_MASK
| M2REG_ACTIVE_INTERRUPT_2_MASK
| M2REG_ACTIVE_INTERRUPT_3_MASK);
if (irq_bits != 0) {
DRM_WRITE32(info->mmio_map,
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits);
xgi_fence_handler(dev);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
}
int xgi_driver_load(struct drm_device *dev, unsigned long flags)
{
struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
int err;
if (!info)
return -ENOMEM;
(void) memset(info, 0, sizeof(*info));
dev->dev_private = info;
info->dev = dev;
info->mmio.base = drm_get_resource_start(dev, 1);
info->mmio.size = drm_get_resource_len(dev, 1);
DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
(unsigned long) info->mmio.base, info->mmio.size);
if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
(unsigned long) info->mmio.base, info->mmio.size);
err = -EINVAL;
goto fail;
}
info->fb.base = drm_get_resource_start(dev, 0);
info->fb.size = drm_get_resource_len(dev, 0);
DRM_INFO("fb base: 0x%lx, size: 0x%x\n",
(unsigned long) info->fb.base, info->fb.size);
err = drm_sman_init(&info->sman, 2, 12, 8);
if (err) {
goto fail;
}
return 0;
fail:
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
return err;
}
int xgi_driver_unload(struct drm_device *dev)
{
struct xgi_info * info = dev->dev_private;
drm_sman_takedown(&info->sman);
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return 0;
}

117
linux-core/xgi_drv.h Normal file
View File

@ -0,0 +1,117 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_DRV_H_
#define _XGI_DRV_H_
#include "drmP.h"
#include "drm.h"
#include "drm_sman.h"
#define DRIVER_AUTHOR "Andrea Zhang <andrea_zhang@macrosynergy.com>"
#define DRIVER_NAME "xgi"
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
#define DRIVER_DATE "20070918"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
#include "xgi_cmdlist.h"
#include "xgi_drm.h"
struct xgi_aperture {
dma_addr_t base;
unsigned int size;
};
struct xgi_info {
struct drm_device *dev;
bool bootstrap_done;
/* physical characteristics */
struct xgi_aperture mmio;
struct xgi_aperture fb;
struct xgi_aperture pcie;
struct drm_map *mmio_map;
struct drm_map *pcie_map;
struct drm_map *fb_map;
/* look up table parameters */
struct ati_pcigart_info gart_info;
unsigned int lutPageSize;
struct drm_sman sman;
bool fb_heap_initialized;
bool pcie_heap_initialized;
struct xgi_cmdring_info cmdring;
DRM_SPINTYPE fence_lock;
unsigned complete_sequence;
unsigned next_sequence;
};
extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int xgi_fb_heap_init(struct xgi_info * info);
extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp);
extern int xgi_free(struct xgi_info * info, unsigned long index,
struct drm_file * filp);
extern int xgi_pcie_heap_init(struct xgi_info * info);
extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
extern void xgi_enable_mmio(struct xgi_info * info);
extern void xgi_disable_mmio(struct xgi_info * info);
extern void xgi_enable_ge(struct xgi_info * info);
extern void xgi_disable_ge(struct xgi_info * info);
extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags, uint32_t * sequence, uint32_t * native_type);
extern void xgi_fence_handler(struct drm_device * dev);
extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
uint32_t flags);
extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_free_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_submit_cmdlist(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_state_change_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
#endif

130
linux-core/xgi_fb.c Normal file
View File

@ -0,0 +1,130 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#define XGI_FB_HEAP_START 0x1000000
int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp)
{
struct drm_memblock_item *block;
const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
? "on-card" : "GART";
if ((alloc->location != XGI_MEMLOC_LOCAL)
&& (alloc->location != XGI_MEMLOC_NON_LOCAL)) {
DRM_ERROR("Invalid memory pool (0x%08x) specified.\n",
alloc->location);
return -EINVAL;
}
if ((alloc->location == XGI_MEMLOC_LOCAL)
? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
DRM_ERROR("Attempt to allocate from uninitialized memory "
"pool (0x%08x).\n", alloc->location);
return -EINVAL;
}
mutex_lock(&info->dev->struct_mutex);
block = drm_sman_alloc(&info->sman, alloc->location, alloc->size,
0, (unsigned long) filp);
mutex_unlock(&info->dev->struct_mutex);
if (block == NULL) {
alloc->size = 0;
DRM_ERROR("%s memory allocation failed\n", mem_name);
return -ENOMEM;
} else {
alloc->offset = (*block->mm->offset)(block->mm,
block->mm_info);
alloc->hw_addr = alloc->offset;
alloc->index = block->user_hash.key;
if (block->user_hash.key != (unsigned long) alloc->index) {
DRM_ERROR("%s truncated handle %lx for pool %d "
"offset %x\n",
__func__, block->user_hash.key,
alloc->location, alloc->offset);
}
if (alloc->location == XGI_MEMLOC_NON_LOCAL) {
alloc->hw_addr += info->pcie.base;
}
DRM_DEBUG("%s memory allocation succeeded: 0x%x\n",
mem_name, alloc->offset);
}
return 0;
}
int xgi_alloc_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *info = dev->dev_private;
return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp);
}
int xgi_free(struct xgi_info * info, unsigned long index,
struct drm_file * filp)
{
int err;
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_free_key(&info->sman, index);
mutex_unlock(&info->dev->struct_mutex);
return err;
}
int xgi_free_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *info = dev->dev_private;
return xgi_free(info, *(unsigned long *) data, filp);
}
int xgi_fb_heap_init(struct xgi_info * info)
{
int err;
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
XGI_FB_HEAP_START,
info->fb.size - XGI_FB_HEAP_START);
mutex_unlock(&info->dev->struct_mutex);
info->fb_heap_initialized = (err == 0);
return err;
}

127
linux-core/xgi_fence.c Normal file
View File

@ -0,0 +1,127 @@
/*
* (C) Copyright IBM Corporation 2007
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Ian Romanick <idr@us.ibm.com>
*/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
{
struct xgi_info * info = dev->dev_private;
struct drm_fence_class_manager * fc = &dev->fm.class[class];
uint32_t pending_flush_types = 0;
uint32_t signaled_flush_types = 0;
if ((info == NULL) || (class != 0))
return 0;
DRM_SPINLOCK(&info->fence_lock);
pending_flush_types = fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
if (pending_flush_types) {
if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
const u32 begin_id = DRM_READ32(info->mmio_map,
0x2820)
& BEGIN_BEGIN_IDENTIFICATION_MASK;
if (begin_id != info->complete_sequence) {
info->complete_sequence = begin_id;
signaled_flush_types |= DRM_FENCE_TYPE_EXE;
}
}
if (signaled_flush_types) {
drm_fence_handler(dev, 0, info->complete_sequence,
signaled_flush_types);
}
}
DRM_SPINUNLOCK(&info->fence_lock);
return fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
}
int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags, uint32_t * sequence,
uint32_t * native_type)
{
struct xgi_info * info = dev->dev_private;
if ((info == NULL) || (class != 0))
return -EINVAL;
DRM_SPINLOCK(&info->fence_lock);
info->next_sequence++;
if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
info->next_sequence = 1;
}
DRM_SPINUNLOCK(&info->fence_lock);
xgi_emit_irq(info);
*sequence = (uint32_t) info->next_sequence;
*native_type = DRM_FENCE_TYPE_EXE;
return 0;
}
void xgi_poke_flush(struct drm_device * dev, uint32_t class)
{
struct drm_fence_manager * fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
xgi_do_flush(dev, class);
write_unlock_irqrestore(&fm->lock, flags);
}
void xgi_fence_handler(struct drm_device * dev)
{
struct drm_fence_manager * fm = &dev->fm;
write_lock(&fm->lock);
xgi_do_flush(dev, 0);
write_unlock(&fm->lock);
}
int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
{
return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
}

140
linux-core/xgi_ioc32.c Normal file
View File

@ -0,0 +1,140 @@
/*
* (C) Copyright IBM Corporation 2007
* Copyright (C) Paul Mackerras 2005.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Ian Romanick <idr@us.ibm.com>
*/
#include <linux/compat.h>
#include "drmP.h"
#include "drm.h"
#include "xgi_drm.h"
/* This is copied from drm_ioc32.c.
*/
struct drm_map32 {
u32 offset; /**< Requested physical address (0 for SAREA)*/
u32 size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
u32 handle; /**< User-space: "Handle" to pass to mmap() */
int mtrr; /**< MTRR slot used */
};
struct drm32_xgi_bootstrap {
struct drm_map32 gart;
};
extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg;
struct drm32_xgi_bootstrap bs32;
struct xgi_bootstrap __user *bs;
int err;
void *handle;
if (copy_from_user(&bs32, argp, sizeof(bs32))) {
return -EFAULT;
}
bs = compat_alloc_user_space(sizeof(*bs));
if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) {
return -EFAULT;
}
if (__put_user(bs32.gart.offset, &bs->gart.offset)
|| __put_user(bs32.gart.size, &bs->gart.size)
|| __put_user(bs32.gart.type, &bs->gart.type)
|| __put_user(bs32.gart.flags, &bs->gart.flags)) {
return -EFAULT;
}
err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP,
(unsigned long)bs);
if (err) {
return err;
}
if (__get_user(bs32.gart.offset, &bs->gart.offset)
|| __get_user(bs32.gart.mtrr, &bs->gart.mtrr)
|| __get_user(handle, &bs->gart.handle)) {
return -EFAULT;
}
bs32.gart.handle = (unsigned long)handle;
if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) {
printk(KERN_ERR "%s truncated handle %p for type %d "
"offset %x\n",
__func__, handle, bs32.gart.type, bs32.gart.offset);
}
if (copy_to_user(argp, &bs32, sizeof(bs32))) {
return -EFAULT;
}
return 0;
}
drm_ioctl_compat_t *xgi_compat_ioctls[] = {
[DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap,
};
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
const unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls))
fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE];
lock_kernel();
ret = (fn != NULL)
? (*fn)(filp, cmd, arg)
: drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
unlock_kernel();
return ret;
}

477
linux-core/xgi_misc.c Normal file
View File

@ -0,0 +1,477 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include <linux/delay.h>
/*
* irq functions
*/
#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
static unsigned int s_invalid_begin = 0;
static bool xgi_validate_signal(struct drm_map * map)
{
if (DRM_READ32(map, 0x2800) & 0x001c0000) {
u16 check;
/* Check Read back status */
DRM_WRITE8(map, 0x235c, 0x80);
check = DRM_READ16(map, 0x2360);
if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
return FALSE;
}
/* Check RO channel */
DRM_WRITE8(map, 0x235c, 0x83);
check = DRM_READ16(map, 0x2360);
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
return FALSE;
}
/* Check RW channel */
DRM_WRITE8(map, 0x235c, 0x88);
check = DRM_READ16(map, 0x2360);
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
return FALSE;
}
/* Check RO channel outstanding */
DRM_WRITE8(map, 0x235c, 0x8f);
check = DRM_READ16(map, 0x2360);
if (0 != (check & 0x3ff)) {
return FALSE;
}
/* Check RW channel outstanding */
DRM_WRITE8(map, 0x235c, 0x90);
check = DRM_READ16(map, 0x2360);
if (0 != (check & 0x3ff)) {
return FALSE;
}
/* No pending PCIE request. GE stall. */
}
return TRUE;
}
static void xgi_ge_hang_reset(struct drm_map * map)
{
int time_out = 0xffff;
DRM_WRITE8(map, 0xb057, 8);
while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) {
while (0 != ((--time_out) & 0xfff))
/* empty */ ;
if (0 == time_out) {
u8 old_3ce;
u8 old_3cf;
u8 old_index;
u8 old_36;
DRM_INFO("Can not reset back 0x%x!\n",
DRM_READ32(map, 0x2800));
DRM_WRITE8(map, 0xb057, 0);
/* Have to use 3x5.36 to reset. */
/* Save and close dynamic gating */
old_3ce = DRM_READ8(map, 0x3ce);
DRM_WRITE8(map, 0x3ce, 0x2a);
old_3cf = DRM_READ8(map, 0x3cf);
DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe);
/* Reset GE */
old_index = DRM_READ8(map, 0x3d4);
DRM_WRITE8(map, 0x3d4, 0x36);
old_36 = DRM_READ8(map, 0x3d5);
DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
while (0 != ((--time_out) & 0xfff))
/* empty */ ;
DRM_WRITE8(map, 0x3d5, old_36);
DRM_WRITE8(map, 0x3d4, old_index);
/* Restore dynamic gating */
DRM_WRITE8(map, 0x3cf, old_3cf);
DRM_WRITE8(map, 0x3ce, old_3ce);
break;
}
}
DRM_WRITE8(map, 0xb057, 0);
}
bool xgi_ge_irq_handler(struct xgi_info * info)
{
const u32 int_status = DRM_READ32(info->mmio_map, 0x2810);
bool is_support_auto_reset = FALSE;
/* Check GE on/off */
if (0 == (0xffffc0f0 & int_status)) {
if (0 != (0x1000 & int_status)) {
/* We got GE stall interrupt.
*/
DRM_WRITE32(info->mmio_map, 0x2810,
int_status | 0x04000000);
if (is_support_auto_reset) {
static cycles_t last_tick;
static unsigned continue_int_count = 0;
/* OE II is busy. */
if (!xgi_validate_signal(info->mmio_map)) {
/* Nothing but skip. */
} else if (0 == continue_int_count++) {
last_tick = get_cycles();
} else {
const cycles_t new_tick = get_cycles();
if ((new_tick - last_tick) >
STALL_INTERRUPT_RESET_THRESHOLD) {
continue_int_count = 0;
} else if (continue_int_count >= 3) {
continue_int_count = 0;
/* GE Hung up, need reset. */
DRM_INFO("Reset GE!\n");
xgi_ge_hang_reset(info->mmio_map);
}
}
}
} else if (0 != (0x1 & int_status)) {
s_invalid_begin++;
DRM_WRITE32(info->mmio_map, 0x2810,
(int_status & ~0x01) | 0x04000000);
}
return TRUE;
}
return FALSE;
}
bool xgi_crt_irq_handler(struct xgi_info * info)
{
bool ret = FALSE;
u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
/* CRT1 interrupt just happened
*/
if (IN3CFB(info->mmio_map, 0x37) & 0x01) {
u8 op3cf_3d;
u8 op3cf_37;
/* What happened?
*/
op3cf_37 = IN3CFB(info->mmio_map, 0x37);
/* Clear CRT interrupt
*/
op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
ret = TRUE;
}
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
return (ret);
}
bool xgi_dvi_irq_handler(struct xgi_info * info)
{
bool ret = FALSE;
const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
/* DVI interrupt just happened
*/
if (IN3CFB(info->mmio_map, 0x38) & 0x20) {
const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
u8 op3cf_39;
u8 op3cf_37;
u8 op3x5_5a;
/* What happened?
*/
op3cf_37 = IN3CFB(info->mmio_map, 0x37);
/* Notify BIOS that DVI plug/unplug happened
*/
op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
/* Clear DVI interrupt
*/
op3cf_39 = IN3CFB(info->mmio_map, 0x39);
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
ret = TRUE;
}
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
return (ret);
}
static void dump_reg_header(unsigned regbase)
{
printk("\n=====xgi_dump_register========0x%x===============\n",
regbase);
printk(" 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
}
static void dump_indexed_reg(struct xgi_info * info, unsigned regbase)
{
unsigned i, j;
u8 temp;
dump_reg_header(regbase);
for (i = 0; i < 0x10; i++) {
printk("%1x ", i);
for (j = 0; j < 0x10; j++) {
DRM_WRITE8(info->mmio_map, regbase - 1,
(i * 0x10) + j);
temp = DRM_READ8(info->mmio_map, regbase);
printk("%3x", temp);
}
printk("\n");
}
}
static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
{
unsigned i, j;
dump_reg_header(regbase);
for (i = 0; i < range; i++) {
printk("%1x ", i);
for (j = 0; j < 0x10; j++) {
u8 temp = DRM_READ8(info->mmio_map,
regbase + (i * 0x10) + j);
printk("%3x", temp);
}
printk("\n");
}
}
void xgi_dump_register(struct xgi_info * info)
{
dump_indexed_reg(info, 0x3c5);
dump_indexed_reg(info, 0x3d5);
dump_indexed_reg(info, 0x3cf);
dump_reg(info, 0xB000, 0x05);
dump_reg(info, 0x2200, 0x0B);
dump_reg(info, 0x2300, 0x07);
dump_reg(info, 0x2400, 0x10);
dump_reg(info, 0x2800, 0x10);
}
#define WHOLD_GE_STATUS 0x2800
/* Test everything except the "whole GE busy" bit, the "master engine busy"
* bit, and the reserved bits [26:21].
*/
#define IDLE_MASK ~((1U<<31) | (1U<<28) | (0x3f<<21))
void xgi_waitfor_pci_idle(struct xgi_info * info)
{
unsigned int idleCount = 0;
u32 old_status = 0;
unsigned int same_count = 0;
while (idleCount < 5) {
const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
& IDLE_MASK;
if (status == old_status) {
same_count++;
if ((same_count % 100) == 0) {
DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n",
old_status, same_count);
}
} else {
old_status = status;
same_count = 0;
}
if (status != 0) {
msleep(1);
idleCount = 0;
} else {
idleCount++;
}
}
}
void xgi_enable_mmio(struct xgi_info * info)
{
u8 protect = 0;
u8 temp;
/* Unprotect registers */
DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
protect = DRM_READ8(info->mmio_map, 0x3C5);
DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
temp = DRM_READ8(info->mmio_map, 0x3D5);
DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
/* Enable MMIO */
DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
temp = DRM_READ8(info->mmio_map, 0x3D5);
DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
/* Protect registers */
OUT3C5B(info->mmio_map, 0x11, protect);
}
void xgi_disable_mmio(struct xgi_info * info)
{
u8 protect = 0;
u8 temp;
/* Unprotect registers */
DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
protect = DRM_READ8(info->mmio_map, 0x3C5);
DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
/* Disable MMIO access */
DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
temp = DRM_READ8(info->mmio_map, 0x3D5);
DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
/* Protect registers */
OUT3C5B(info->mmio_map, 0x11, protect);
}
void xgi_enable_ge(struct xgi_info * info)
{
u8 bOld3cf2a;
int wait = 0;
OUT3C5B(info->mmio_map, 0x11, 0x92);
/* Save and close dynamic gating
*/
bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL);
OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM);
/* Enable 2D and 3D GE
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Reset both 3D and 2D engine
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL,
(GE_ENABLE | GE_RESET | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Enable 2D engine only
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE);
/* Enable 2D+3D engine
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
/* Restore dynamic gating
*/
OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a);
}
void xgi_disable_ge(struct xgi_info * info)
{
int wait = 0;
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Reset both 3D and 2D engine
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL,
(GE_ENABLE | GE_RESET | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Disable 2D engine and 3D engine.
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0);
}

37
linux-core/xgi_misc.h Normal file
View File

@ -0,0 +1,37 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_MISC_H_
#define _XGI_MISC_H_
extern void xgi_dump_register(struct xgi_info * info);
extern bool xgi_ge_irq_handler(struct xgi_info * info);
extern bool xgi_crt_irq_handler(struct xgi_info * info);
extern bool xgi_dvi_irq_handler(struct xgi_info * info);
extern void xgi_waitfor_pci_idle(struct xgi_info * info);
#endif

126
linux-core/xgi_pcie.c Normal file
View File

@ -0,0 +1,126 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
void xgi_gart_flush(struct drm_device *dev)
{
struct xgi_info *const info = dev->dev_private;
u8 temp;
DRM_MEMORYBARRIER();
/* Set GART in SFB */
temp = DRM_READ8(info->mmio_map, 0xB00C);
DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
/* Set GART base address to HW */
DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr);
/* Flush GART table. */
DRM_WRITE8(info->mmio_map, 0xB03F, 0x40);
DRM_WRITE8(info->mmio_map, 0xB03F, 0x00);
}
int xgi_pcie_heap_init(struct xgi_info * info)
{
u8 temp = 0;
int err;
struct drm_scatter_gather request;
/* Get current FB aperture size */
temp = IN3X5B(info->mmio_map, 0x27);
DRM_INFO("In3x5(0x27): 0x%x \n", temp);
if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
info->pcie.base = 256 * 1024 * 1024;
} else { /* 128MB; Jong 06/05/2006; 0x08000000 */
info->pcie.base = 128 * 1024 * 1024;
}
DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
/* Get current lookup table page size */
temp = DRM_READ8(info->mmio_map, 0xB00C);
if (temp & 0x04) { /* 8KB */
info->lutPageSize = 8 * 1024;
} else { /* 4KB */
info->lutPageSize = 4 * 1024;
}
DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
request.size = info->pcie.size;
err = drm_sg_alloc(info->dev, & request);
if (err) {
DRM_ERROR("cannot allocate PCIE GART backing store! "
"size = %d\n", info->pcie.size);
return err;
}
info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);
if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
return -ENOMEM;
}
xgi_gart_flush(info->dev);
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL,
0, info->pcie.size);
mutex_unlock(&info->dev->struct_mutex);
if (err) {
drm_ati_pcigart_cleanup(info->dev, &info->gart_info);
}
info->pcie_heap_initialized = (err == 0);
return err;
}
/**
* xgi_find_pcie_virt
* @address: GE HW address
*
* Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
* the same block
*/
void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
{
const unsigned long offset = address - info->pcie.base;
return ((u8 *) info->dev->sg->virtual) + offset;
}

169
linux-core/xgi_regs.h Normal file
View File

@ -0,0 +1,169 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_REGS_H_
#define _XGI_REGS_H_
#include "drmP.h"
#include "drm.h"
#define MAKE_MASK(bits) ((1U << (bits)) - 1)
#define ONE_BIT_MASK MAKE_MASK(1)
#define TWENTY_BIT_MASK MAKE_MASK(20)
#define TWENTYONE_BIT_MASK MAKE_MASK(21)
#define TWENTYTWO_BIT_MASK MAKE_MASK(22)
/* Port 0x3d4/0x3d5, index 0x2a */
#define XGI_INTERFACE_SEL 0x2a
#define DUAL_64BIT (1U<<7)
#define INTERNAL_32BIT (1U<<6)
#define EN_SEP_WR (1U<<5)
#define POWER_DOWN_SEL (1U<<4)
/*#define RESERVED_3 (1U<<3) */
#define SUBS_MCLK_PCICLK (1U<<2)
#define MEM_SIZE_MASK (3<<0)
#define MEM_SIZE_32MB (0<<0)
#define MEM_SIZE_64MB (1<<0)
#define MEM_SIZE_128MB (2<<0)
#define MEM_SIZE_256MB (3<<0)
/* Port 0x3d4/0x3d5, index 0x36 */
#define XGI_GE_CNTL 0x36
#define GE_ENABLE (1U<<7)
/*#define RESERVED_6 (1U<<6) */
/*#define RESERVED_5 (1U<<5) */
#define GE_RESET (1U<<4)
/*#define RESERVED_3 (1U<<3) */
#define GE_ENABLE_3D (1U<<2)
/*#define RESERVED_1 (1U<<1) */
/*#define RESERVED_0 (1U<<0) */
/* Port 0x3ce/0x3cf, index 0x2a */
#define XGI_MISC_CTRL 0x2a
#define MOTION_VID_SUSPEND (1U<<7)
#define DVI_CRTC_TIMING_SEL (1U<<6)
#define LCD_SEL_CTL_NEW (1U<<5)
#define LCD_SEL_EXT_DELYCTRL (1U<<4)
#define REG_LCDDPARST (1U<<3)
#define LCD2DPAOFF (1U<<2)
/*#define RESERVED_1 (1U<<1) */
#define EN_GEPWM (1U<<0) /* Enable GE power management */
#define BASE_3D_ENG 0x2800
#define M2REG_FLUSH_ENGINE_ADDRESS 0x000
#define M2REG_FLUSH_ENGINE_COMMAND 0x00
#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21)
#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20)
#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK
#define M2REG_RESET_ADDRESS 0x004
#define M2REG_RESET_COMMAND 0x01
#define M2REG_RESET_STATUS2_MASK (ONE_BIT_MASK<<10)
#define M2REG_RESET_STATUS1_MASK (ONE_BIT_MASK<<9)
#define M2REG_RESET_STATUS0_MASK (ONE_BIT_MASK<<8)
#define M2REG_RESET_3DENG_MASK (ONE_BIT_MASK<<4)
#define M2REG_RESET_2DENG_MASK (ONE_BIT_MASK<<2)
/* Write register */
#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010
#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04
#define M2REG_CLEAR_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11)
#define M2REG_CLEAR_INTERRUPT_3_MASK (ONE_BIT_MASK<<10)
#define M2REG_CLEAR_INTERRUPT_2_MASK (ONE_BIT_MASK<<9)
#define M2REG_CLEAR_INTERRUPT_0_MASK (ONE_BIT_MASK<<8)
#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4)
#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1)
#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK (ONE_BIT_MASK<<0)
/* Read register */
#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010
#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04
#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11)
#define M2REG_ACTIVE_INTERRUPT_3_MASK (ONE_BIT_MASK<<10)
#define M2REG_ACTIVE_INTERRUPT_2_MASK (ONE_BIT_MASK<<9)
#define M2REG_ACTIVE_INTERRUPT_0_MASK (ONE_BIT_MASK<<8)
#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK (ONE_BIT_MASK<<0)
#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014
#define M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05
/**
* Begin instruction, double-word 0
*/
#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK (ONE_BIT_MASK<<22)
#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20)
#define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK
/**
* Begin instruction, double-word 1
*/
#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31)
#define BEGIN_COMMAND_LIST_LENGTH_MASK TWENTYTWO_BIT_MASK
/* Hardware access functions */
static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
{
DRM_WRITE8(map, 0x3C4, index);
DRM_WRITE8(map, 0x3C5, data);
}
static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
{
DRM_WRITE8(map, 0x3D4, index);
DRM_WRITE8(map, 0x3D5, data);
}
static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
{
DRM_WRITE8(map, 0x3CE, index);
DRM_WRITE8(map, 0x3CF, data);
}
static inline u8 IN3C5B(struct drm_map * map, u8 index)
{
DRM_WRITE8(map, 0x3C4, index);
return DRM_READ8(map, 0x3C5);
}
static inline u8 IN3X5B(struct drm_map * map, u8 index)
{
DRM_WRITE8(map, 0x3D4, index);
return DRM_READ8(map, 0x3D5);
}
static inline u8 IN3CFB(struct drm_map * map, u8 index)
{
DRM_WRITE8(map, 0x3CE, index);
return DRM_READ8(map, 0x3CF);
}
#endif

View File

@ -36,4 +36,5 @@ klibdrminclude_HEADERS = \
sis_drm.h \
via_drm.h \
r300_reg.h \
via_3d_reg.h
via_3d_reg.h \
xgi_drm.h

View File

@ -89,24 +89,6 @@
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#endif
#define XFREE86_VERSION(major,minor,patch,snap) \
((major << 16) | (minor << 8) | patch)
#ifndef CONFIG_XFREE86_VERSION
#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
#endif
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
#define DRM_PROC_DEVICES "/proc/devices"
#define DRM_PROC_MISC "/proc/misc"
#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
#endif
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
#ifdef __OpenBSD__
#define DRM_MAJOR 81
#endif
@ -114,7 +96,7 @@
#define DRM_MAJOR 226
#endif
#define DRM_MAX_MINOR 15
#endif
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */

View File

@ -482,9 +482,6 @@
0x10DE 0x009E NV40 "NVidia 0x009E"
[nouveau]
0x10de 0x0008 NV_03 "EDGE 3D"
0x10de 0x0009 NV_03 "EDGE 3D"
0x10de 0x0010 NV_03 "Mutara V08"
0x10de 0x0020 NV_04 "RIVA TNT"
0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro"
0x10de 0x0029 NV_04 "RIVA TNT2 Ultra"
@ -732,13 +729,13 @@
0x10de 0x0421 NV_50 "GeForce 8500 GT"
0x10de 0x0422 NV_50 "GeForce 8400 GS"
0x10de 0x0423 NV_50 "GeForce 8300 GS"
0x12d2 0x0008 NV_03 "NV1"
0x12d2 0x0009 NV_03 "DAC64"
0x12d2 0x0018 NV_03 "Riva128"
0x12d2 0x0019 NV_03 "Riva128ZX"
0x10de 0x0429 NV_50 "Quadro NVS 140"
0x12d2 0x0020 NV_04 "TNT"
0x12d2 0x0028 NV_04 "TNT2"
0x12d2 0x0029 NV_04 "UTNT2"
0x12d2 0x002c NV_04 "VTNT2"
0x12d2 0x00a0 NV_04 "ITNT2"
[xgi]
0x18ca 0x2200 0 "XP5"
0x18ca 0x0047 0 "XP10 / XG47"

View File

@ -572,11 +572,11 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync)
static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 num_pages, current_page, next_page, dspbase;
int shift = 2 * pipe, x, y;
int shift = 2 * plane, x, y;
RING_LOCALS;
/* Calculate display base offset */
@ -597,25 +597,25 @@ static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync)
break;
}
if (pipe == 0) {
x = dev_priv->sarea_priv->pipeA_x;
y = dev_priv->sarea_priv->pipeA_y;
if (plane == 0) {
x = dev_priv->sarea_priv->planeA_x;
y = dev_priv->sarea_priv->planeA_y;
} else {
x = dev_priv->sarea_priv->pipeB_x;
y = dev_priv->sarea_priv->pipeB_y;
x = dev_priv->sarea_priv->planeB_x;
y = dev_priv->sarea_priv->planeB_y;
}
dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
DRM_DEBUG("pipe=%d current_page=%d dspbase=0x%x\n", pipe, current_page,
DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
dspbase);
BEGIN_LP_RING(4);
OUT_RING(sync ? 0 :
(MI_WAIT_FOR_EVENT | (pipe ? MI_WAIT_FOR_PLANE_B_FLIP :
(MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
MI_WAIT_FOR_PLANE_A_FLIP)));
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
(pipe ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
(plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
OUT_RING(dspbase);
ADVANCE_LP_RING();
@ -624,19 +624,19 @@ static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync)
dev_priv->sarea_priv->pf_current_page |= next_page << shift;
}
void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync)
void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
DRM_DEBUG("%s: pipes=0x%x pfCurrentPage=%d\n",
DRM_DEBUG("%s: planes=0x%x pfCurrentPage=%d\n",
__FUNCTION__,
pipes, dev_priv->sarea_priv->pf_current_page);
planes, dev_priv->sarea_priv->pf_current_page);
i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
for (i = 0; i < 2; i++)
if (pipes & (1 << i))
if (planes & (1 << i))
i915_do_dispatch_flip(dev, i, sync);
i915_emit_breadcrumb(dev);
@ -728,21 +728,21 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
static int i915_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, pipes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
DRM_DEBUG("%s\n", __FUNCTION__);
for (i = 0, pipes = 0; i < 2; i++)
for (i = 0, planes = 0; i < 2; i++)
if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
dev_priv->sarea_priv->pf_current_page =
(dev_priv->sarea_priv->pf_current_page &
~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i);
pipes |= 1 << i;
planes |= 1 << i;
}
if (pipes)
i915_dispatch_flip(dev, pipes, 0);
if (planes)
i915_dispatch_flip(dev, planes, 0);
return 0;
}
@ -755,13 +755,13 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *f
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (param->pipes & ~0x3) {
DRM_ERROR("Invalid pipes 0x%x, only <= 0x3 is valid\n",
param->pipes);
if (param->planes & ~0x3) {
DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
param->planes);
return -EINVAL;
}
i915_dispatch_flip(dev, param->pipes, 0);
i915_dispatch_flip(dev, param->planes, 0);
return 0;
}

View File

@ -105,14 +105,14 @@ typedef struct _drm_i915_sarea {
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
int planeA_x;
int planeA_y;
int planeA_w;
int planeA_h;
int planeB_x;
int planeB_y;
int planeB_w;
int planeB_h;
/* Triple buffering */
drm_handle_t third_handle;
@ -182,7 +182,7 @@ typedef struct _drm_i915_sarea {
/* Asynchronous page flipping:
*/
typedef struct drm_i915_flip {
int pipes;
int planes;
} drm_i915_flip_t;
/* Allow drivers to submit batchbuffers directly to hardware, relying

View File

@ -55,10 +55,11 @@
* - Support vertical blank on secondary display pipe
* 1.8: New ioctl for ARB_Occlusion_Query
* 1.9: Usable page flipping and triple buffering
* 1.10: Plane/pipe disentangling
*/
#define DRIVER_MAJOR 1
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
#define DRIVER_MINOR 9
#define DRIVER_MINOR 10
#else
#define DRIVER_MINOR 6
#endif
@ -87,7 +88,7 @@ struct mem_block {
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
unsigned int pipe;
unsigned int plane;
unsigned int sequence;
int flip;
} drm_i915_vbl_swap_t;
@ -272,12 +273,25 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_READ_FLUSH (1 << 0)
#define MI_EXE_FLUSH (1 << 1)
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
/* Packet to load a register value from the ring/batch command stream:
*/
#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
/* Interrupt bits:
*/
#define USER_INT_FLAG (1<<1)
#define VSYNC_PIPEB_FLAG (1<<5)
#define VSYNC_PIPEA_FLAG (1<<7)
#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8
@ -315,6 +329,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define NOPID 0x2094
#define LP_RING 0x2030
#define HP_RING 0x2040
/* The binner has its own ring buffer:
*/
#define HWB_RING 0x2400
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
@ -333,11 +351,105 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
/* Instruction parser error reg:
*/
#define IPEIR 0x2088
/* Scratch pad debug 0 reg:
*/
#define SCPD0 0x209c
/* Error status reg:
*/
#define ESR 0x20b8
/* Secondary DMA fetch address debug reg:
*/
#define DMA_FADD_S 0x20d4
/* Cache mode 0 reg.
* - Manipulating render cache behaviour is central
* to the concept of zone rendering, tuning this reg can help avoid
* unnecessary render cache reads and even writes (for z/stencil)
* at beginning and end of scene.
*
* - To change a bit, write to this reg with a mask bit set and the
* bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
*/
#define Cache_Mode_0 0x2120
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_DEPTH_EVICT_DISABLE (1<<4)
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
/* Graphics flush control. A CPU write flushes the GWB of all writes.
* The data is discarded.
*/
#define GFX_FLSH_CNTL 0x2170
/* Binner control. Defines the location of the bin pointer list:
*/
#define BINCTL 0x2420
#define BC_MASK (1 << 9)
/* Binned scene info.
*/
#define BINSCENE 0x2428
#define BS_OP_LOAD (1 << 8)
#define BS_MASK (1 << 22)
/* Bin command parser debug reg:
*/
#define BCPD 0x2480
/* Bin memory control debug reg:
*/
#define BMCD 0x2484
/* Bin data cache debug reg:
*/
#define BDCD 0x2488
/* Binner pointer cache debug reg:
*/
#define BPCD 0x248c
/* Binner scratch pad debug reg:
*/
#define BINSKPD 0x24f0
/* HWB scratch pad debug reg:
*/
#define HWBSKPD 0x24f4
/* Binner memory pool reg:
*/
#define BMP_BUFFER 0x2430
#define BMP_PAGE_SIZE_4K (0 << 10)
#define BMP_BUFFER_SIZE_SHIFT 1
#define BMP_ENABLE (1 << 0)
/* Get/put memory from the binner memory pool:
*/
#define BMP_GET 0x2438
#define BMP_PUT 0x2440
#define BMP_OFFSET_SHIFT 5
/* 3D state packets:
*/
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
@ -378,6 +490,15 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
/* Display regs */
#define DSPACNTR 0x70180
#define DSPBCNTR 0x71180
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
/* Define the region of interest for the binner:
*/
#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define BREADCRUMB_BITS 31

View File

@ -37,6 +37,26 @@
#define MAX_NOPID ((u32)~0)
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
* @plane: plane to look for
*
* We need to get the pipe associated with a given plane to correctly perform
* vblank driven swapping, and they may not always be equal. So look up the
* pipe associated with @plane here.
*/
static int
i915_get_pipe(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 dspcntr;
dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
}
/**
* Emit a synchronous flip.
*
@ -44,28 +64,28 @@
*/
static void
i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
int pipe)
int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
u16 x1, y1, x2, y2;
int pf_pipes = 1 << pipe;
int pf_planes = 1 << plane;
DRM_SPINLOCK_ASSERT(&dev->drw_lock);
/* If the window is visible on the other pipe, we have to flip on that
* pipe as well.
/* If the window is visible on the other plane, we have to flip on that
* plane as well.
*/
if (pipe == 1) {
x1 = sarea_priv->pipeA_x;
y1 = sarea_priv->pipeA_y;
x2 = x1 + sarea_priv->pipeA_w;
y2 = y1 + sarea_priv->pipeA_h;
if (plane == 1) {
x1 = sarea_priv->planeA_x;
y1 = sarea_priv->planeA_y;
x2 = x1 + sarea_priv->planeA_w;
y2 = y1 + sarea_priv->planeA_h;
} else {
x1 = sarea_priv->pipeB_x;
y1 = sarea_priv->pipeB_y;
x2 = x1 + sarea_priv->pipeB_w;
y2 = y1 + sarea_priv->pipeB_h;
x1 = sarea_priv->planeB_x;
y1 = sarea_priv->planeB_y;
x2 = x1 + sarea_priv->planeB_w;
y2 = y1 + sarea_priv->planeB_h;
}
if (x2 > 0 && y2 > 0) {
@ -75,13 +95,13 @@ i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
for (i = 0; i < num_rects; i++)
if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
pf_pipes = 0x3;
pf_planes = 0x3;
break;
}
}
i915_dispatch_flip(dev, pf_pipes, 1);
i915_dispatch_flip(dev, pf_planes, 1);
}
/**
@ -124,8 +144,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
int pipe = i915_get_pipe(dev, vbl_swap->plane);
if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
list_del(list);
@ -167,7 +188,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
DRM_SPINLOCK(&dev_priv->swaps_lock);
}
DRM_SPINUNLOCK(&dev->drw_lock);
DRM_SPINUNLOCK(&dev_priv->swaps_lock);
if (nhits == 0) {
return;
@ -176,10 +197,10 @@ static void i915_vblank_tasklet(struct drm_device *dev)
i915_kernel_lost_context(dev);
upper[0] = upper[1] = 0;
slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
lower[0] = sarea_priv->pipeA_y + slice[0];
lower[1] = sarea_priv->pipeB_y + slice[0];
slice[0] = max(sarea_priv->planeA_h / nhits, 1);
slice[1] = max(sarea_priv->planeB_h / nhits, 1);
lower[0] = sarea_priv->planeA_y + slice[0];
lower[1] = sarea_priv->planeB_y + slice[0];
offsets[0] = sarea_priv->front_offset;
offsets[1] = sarea_priv->back_offset;
@ -205,7 +226,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
int num_rects, pipe, front, back;
int num_rects, plane, front, back;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@ -213,10 +234,10 @@ static void i915_vblank_tasklet(struct drm_device *dev)
if (!drw)
continue;
pipe = swap_hit->pipe;
plane = swap_hit->plane;
if (swap_hit->flip) {
i915_dispatch_vsync_flip(dev, drw, pipe);
i915_dispatch_vsync_flip(dev, drw, plane);
continue;
}
@ -238,11 +259,11 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
rect = drw->rects;
top = upper[pipe];
bottom = lower[pipe];
top = upper[plane];
bottom = lower[plane];
front = (dev_priv->sarea_priv->pf_current_page >>
(2 * pipe)) & 0x3;
(2 * plane)) & 0x3;
back = (front + 1) % num_pages;
for (num_rects = drw->num_rects; num_rects--; rect++) {
@ -560,7 +581,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
drm_i915_vbl_swap_t *vbl_swap;
unsigned int pipe, seqtype, curseq;
unsigned int pipe, seqtype, curseq, plane;
unsigned long irqflags;
struct list_head *list;
@ -581,7 +602,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
pipe = i915_get_pipe(dev, plane);
seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
@ -590,6 +612,21 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
/* It makes no sense to schedule a swap for a drawable that doesn't have
* valid information at this point. E.g. this could mean that the X
* server is too old to push drawable information to the DRM, in which
* case all such swaps would become ineffective.
*/
if (!drm_get_drawable_info(dev, swap->drawable)) {
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
return -EINVAL;
}
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
if (seqtype == _DRM_VBLANK_RELATIVE)
@ -624,7 +661,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
i915_dispatch_vsync_flip(dev, drw, pipe);
i915_dispatch_vsync_flip(dev, drw, plane);
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
@ -638,7 +675,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
if (vbl_swap->drw_id == swap->drawable &&
vbl_swap->pipe == pipe &&
vbl_swap->plane == plane &&
vbl_swap->sequence == swap->sequence) {
vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
@ -664,7 +701,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
vbl_swap->drw_id = swap->drawable;
vbl_swap->pipe = pipe;
vbl_swap->plane = plane;
vbl_swap->sequence = swap->sequence;
vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);

View File

@ -119,8 +119,6 @@ struct drm_nouveau_setparam {
enum nouveau_card_type {
NV_UNKNOWN =0,
NV_01 =1,
NV_03 =3,
NV_04 =4,
NV_05 =5,
NV_10 =10,

View File

@ -421,6 +421,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
/* nouveau_sgdma.c */
extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
uint32_t *page);
extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
extern int nouveau_sgdma_nottm_hack_init(struct drm_device *);
extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *);

View File

@ -34,8 +34,6 @@ int nouveau_fifo_number(struct drm_device *dev)
struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type)
{
case NV_03:
return 8;
case NV_04:
case NV_05:
return 16;
@ -84,9 +82,16 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)
{
case NV_50:
case NV_40:
switch (dev_priv->chipset) {
case 0x47:
case 0x49:
case 0x4b:
NV_WRITE(0x2230, 1);
break;
default:
break;
}
NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b))
NV_WRITE(0x2230,0x00000001);
break;
case NV_44:
NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
@ -102,7 +107,6 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)
case NV_11:
case NV_10:
case NV_04:
case NV_03:
NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
break;
}

View File

@ -227,8 +227,10 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
struct drm_nouveau_private *dev_priv = dev->dev_private;
int channel;
if (dev_priv->card_type < NV_40) {
channel = (NV_READ(0x400704) >> 20) & 0x1f;
if (dev_priv->card_type < NV_10) {
channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
} else if (dev_priv->card_type < NV_40) {
channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
} else
if (dev_priv->card_type < NV_50) {
uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 4;
@ -283,16 +285,22 @@ nouveau_graph_dump_trap_info(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t address;
uint32_t channel, class;
uint32_t method, subc, data;
uint32_t method, subc, data, data2;
uint32_t nsource, nstatus;
if (nouveau_graph_trapped_channel(dev, &channel))
channel = -1;
address = NV_READ(0x400704);
subc = (address >> 16) & 0x7;
data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
method = address & 0x1FFC;
data = NV_READ(0x400708);
if (dev_priv->card_type < NV_10) {
subc = (address >> 13) & 0x7;
data2= 0;
} else {
subc = (address >> 16) & 0x7;
data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
}
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
if (dev_priv->card_type < NV_50) {
@ -309,8 +317,8 @@ nouveau_graph_dump_trap_info(struct drm_device *dev)
ARRAY_SIZE(nouveau_nstatus_names));
printk("\n");
DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x\n",
channel, subc, class, method, data);
DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n",
channel, subc, class, method, data2, data);
}
static void nouveau_pgraph_irq_handler(struct drm_device *dev)

View File

@ -219,24 +219,44 @@ void nouveau_mem_close(struct drm_device *dev)
nouveau_mem_takedown(&dev_priv->pci_heap);
}
/*XXX won't work on BSD because of pci_read_config_dword */
static uint32_t
nouveau_mem_fb_amount_igp(struct drm_device *dev)
{
#if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pci_dev *bridge;
uint32_t mem;
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
if (!bridge) {
DRM_ERROR("no bridge device\n");
return 0;
}
if (dev_priv->flags&NV_NFORCE) {
pci_read_config_dword(bridge, 0x7C, &mem);
return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
} else
if(dev_priv->flags&NV_NFORCE2) {
pci_read_config_dword(bridge, 0x84, &mem);
return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
}
DRM_ERROR("impossible!\n");
#else
DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
#endif
return 0;
}
/* returns the amount of FB ram in bytes */
uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type)
{
case NV_03:
switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
{
case NV03_BOOT_0_RAM_AMOUNT_8MB:
case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM:
return 8*1024*1024;
case NV03_BOOT_0_RAM_AMOUNT_4MB:
return 4*1024*1024;
case NV03_BOOT_0_RAM_AMOUNT_2MB:
return 2*1024*1024;
}
break;
case NV_04:
case NV_05:
if (NV_READ(NV03_BOOT_0) & 0x00000100) {
@ -263,18 +283,14 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
case NV_44:
case NV_50:
default:
// XXX won't work on BSD because of pci_read_config_dword
if (dev_priv->flags&NV_NFORCE) {
uint32_t mem;
pci_read_config_dword(dev->pdev, 0x7C, &mem);
return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
} else if(dev_priv->flags&NV_NFORCE2) {
uint32_t mem;
pci_read_config_dword(dev->pdev, 0x84, &mem);
return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
return nouveau_mem_fb_amount_igp(dev);
} else {
uint64_t mem;
mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
mem = (NV_READ(NV04_FIFO_DATA) &
NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
return mem*1024*1024;
}
break;
@ -411,11 +427,11 @@ int nouveau_mem_init(struct drm_device *dev)
struct drm_scatter_gather sgreq;
DRM_DEBUG("Allocating sg memory for PCI DMA\n");
sgreq.size = 16 << 20; //4MB of PCI scatter-gather zone
sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
if (drm_sg_alloc(dev, &sgreq)) {
DRM_ERROR("Unable to allocate 4MB of scatter-gather"
" pages for PCI DMA!");
DRM_ERROR("Unable to allocate %dMB of scatter-gather"
" pages for PCI DMA!",sgreq.size>>20);
} else {
if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
dev->sg->pages * PAGE_SIZE)) {
@ -531,13 +547,13 @@ alloc_ok:
block->map_handle = entry->user_token;
}
DRM_INFO("allocated 0x%llx\n", block->start);
DRM_DEBUG("allocated 0x%llx type=0x%08x\n", block->start, block->flags);
return block;
}
void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
{
DRM_INFO("freeing 0x%llx\n", block->start);
DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
if (block->flags&NOUVEAU_MEM_MAPPED)
drm_rmmap(dev, block->map);
nouveau_mem_free_block(block);

View File

@ -37,20 +37,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
int flags, ret;
/*TODO: PCI notifier blocks */
if (dev_priv->agp_heap &&
dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE;
else if ( dev_priv->pci_heap )
if (dev_priv->agp_heap)
flags = NOUVEAU_MEM_AGP;
else if (dev_priv->pci_heap)
flags = NOUVEAU_MEM_PCI;
else
flags = NOUVEAU_MEM_FB;
flags |= NOUVEAU_MEM_MAPPED;
flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE);
DRM_DEBUG("Allocating notifier block in %d\n", flags);
chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
(struct drm_file *)-2);
if (!chan->notifier_block)
return -ENOMEM;
DRM_DEBUG("Allocated notifier block in 0x%08x\n",
chan->notifier_block->flags);
ret = nouveau_mem_init_heap(&chan->notifier_heap,
0, chan->notifier_block->size);
@ -88,6 +88,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int count, uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct mem_block *mem;
uint32_t offset;
@ -99,7 +100,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -EINVAL;
}
mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0,
mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
(struct drm_file *)-2);
if (!mem) {
DRM_ERROR("Channel %d notifier block full\n", chan->id);
@ -107,18 +108,29 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
}
mem->flags = NOUVEAU_MEM_NOTIFIER;
offset = chan->notifier_block->start + mem->start;
offset = chan->notifier_block->start;
if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
target = NV_DMA_TARGET_VIDMEM;
} else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
target = NV_DMA_TARGET_AGP;
} else if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
} else
if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
dev_priv->card_type < NV_50) {
ret = nouveau_sgdma_get_page(dev, offset, &offset);
if (ret)
return ret;
target = NV_DMA_TARGET_PCI;
} else {
target = NV_DMA_TARGET_AGP;
}
} else
if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
target = NV_DMA_TARGET_PCI_NONLINEAR;
} else {
DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
chan->notifier_block->flags);
return -EINVAL;
}
offset += mem->start;
if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
offset, mem->size,

View File

@ -141,8 +141,13 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
ref->channel, co, INSTANCE_RD(ramht, co/4));
co += 8;
if (co >= dev_priv->ramht_size)
if (co >= dev_priv->ramht_size) {
DRM_INFO("no space left after collision\n");
co = 0;
/* exit as it seems to cause crash with nouveau_demo and
* 0xdead0001 object */
break;
}
} while (co != ho);
DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);

View File

@ -15,9 +15,6 @@
# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
#define NV03_PGRAPH_STATUS 0x004006b0
#define NV04_PGRAPH_STATUS 0x00400700
#define NV_RAMIN 0x00700000
#define NV_RAMHT_HANDLE_OFFSET 0
@ -178,6 +175,10 @@
#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
#define NV40_PGRAPH_CTXCTL_0310 0x00400310
#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
@ -260,7 +261,12 @@
#define NV04_PGRAPH_BLIMIT5 0x00400698
#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
#define NV03_PGRAPH_STATUS 0x004006B0
#define NV04_PGRAPH_STATUS 0x00400700
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C
#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
#define NV04_PGRAPH_STATE 0x00400710
#define NV10_PGRAPH_SURFACE 0x00400710
#define NV04_PGRAPH_NOTIFY 0x00400714
@ -313,6 +319,18 @@
#define NV47_PGRAPH_TSTATUS0(i) 0x00400D0C
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
#define NV10_PGRAPH_XFMODE0 0x00400F40

View File

@ -538,9 +538,6 @@ void nouveau_wait_for_idle(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type) {
case NV_03:
while (NV_READ(NV03_PGRAPH_STATUS));
break;
case NV_50:
break;
default: {

View File

@ -27,262 +27,321 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
struct reg_interval
{
uint32_t reg;
int number;
} nv04_graph_ctx_regs [] = {
{NV04_PGRAPH_CTX_SWITCH1,1},
{NV04_PGRAPH_CTX_SWITCH2,1},
{NV04_PGRAPH_CTX_SWITCH3,1},
{NV04_PGRAPH_CTX_SWITCH4,1},
{NV04_PGRAPH_CTX_CACHE1,1},
{NV04_PGRAPH_CTX_CACHE2,1},
{NV04_PGRAPH_CTX_CACHE3,1},
{NV04_PGRAPH_CTX_CACHE4,1},
{0x00400184,1},
{0x004001a4,1},
{0x004001c4,1},
{0x004001e4,1},
{0x00400188,1},
{0x004001a8,1},
{0x004001c8,1},
{0x004001e8,1},
{0x0040018c,1},
{0x004001ac,1},
{0x004001cc,1},
{0x004001ec,1},
{0x00400190,1},
{0x004001b0,1},
{0x004001d0,1},
{0x004001f0,1},
{0x00400194,1},
{0x004001b4,1},
{0x004001d4,1},
{0x004001f4,1},
{0x00400198,1},
{0x004001b8,1},
{0x004001d8,1},
{0x004001f8,1},
{0x0040019c,1},
{0x004001bc,1},
{0x004001dc,1},
{0x004001fc,1},
{0x00400174,1},
{NV04_PGRAPH_DMA_START_0,1},
{NV04_PGRAPH_DMA_START_1,1},
{NV04_PGRAPH_DMA_LENGTH,1},
{NV04_PGRAPH_DMA_MISC,1},
{NV04_PGRAPH_DMA_PITCH,1},
{NV04_PGRAPH_BOFFSET0,1},
{NV04_PGRAPH_BBASE0,1},
{NV04_PGRAPH_BLIMIT0,1},
{NV04_PGRAPH_BOFFSET1,1},
{NV04_PGRAPH_BBASE1,1},
{NV04_PGRAPH_BLIMIT1,1},
{NV04_PGRAPH_BOFFSET2,1},
{NV04_PGRAPH_BBASE2,1},
{NV04_PGRAPH_BLIMIT2,1},
{NV04_PGRAPH_BOFFSET3,1},
{NV04_PGRAPH_BBASE3,1},
{NV04_PGRAPH_BLIMIT3,1},
{NV04_PGRAPH_BOFFSET4,1},
{NV04_PGRAPH_BBASE4,1},
{NV04_PGRAPH_BLIMIT4,1},
{NV04_PGRAPH_BOFFSET5,1},
{NV04_PGRAPH_BBASE5,1},
{NV04_PGRAPH_BLIMIT5,1},
{NV04_PGRAPH_BPITCH0,1},
{NV04_PGRAPH_BPITCH1,1},
{NV04_PGRAPH_BPITCH2,1},
{NV04_PGRAPH_BPITCH3,1},
{NV04_PGRAPH_BPITCH4,1},
{NV04_PGRAPH_SURFACE,1},
{NV04_PGRAPH_STATE,1},
{NV04_PGRAPH_BSWIZZLE2,1},
{NV04_PGRAPH_BSWIZZLE5,1},
{NV04_PGRAPH_BPIXEL,1},
{NV04_PGRAPH_NOTIFY,1},
{NV04_PGRAPH_PATT_COLOR0,1},
{NV04_PGRAPH_PATT_COLOR1,1},
{NV04_PGRAPH_PATT_COLORRAM,64},
{NV04_PGRAPH_PATTERN,1},
{0x0040080c,1},
{NV04_PGRAPH_PATTERN_SHAPE,1},
{0x00400600,1},
{NV04_PGRAPH_ROP3,1},
{NV04_PGRAPH_CHROMA,1},
{NV04_PGRAPH_BETA_AND,1},
{NV04_PGRAPH_BETA_PREMULT,1},
{NV04_PGRAPH_CONTROL0,1},
{NV04_PGRAPH_CONTROL1,1},
{NV04_PGRAPH_CONTROL2,1},
{NV04_PGRAPH_BLEND,1},
{NV04_PGRAPH_STORED_FMT,1},
{NV04_PGRAPH_SOURCE_COLOR,1},
{0x00400560,1},
{0x00400568,1},
{0x00400564,1},
{0x0040056c,1},
{0x00400400,1},
{0x00400480,1},
{0x00400404,1},
{0x00400484,1},
{0x00400408,1},
{0x00400488,1},
{0x0040040c,1},
{0x0040048c,1},
{0x00400410,1},
{0x00400490,1},
{0x00400414,1},
{0x00400494,1},
{0x00400418,1},
{0x00400498,1},
{0x0040041c,1},
{0x0040049c,1},
{0x00400420,1},
{0x004004a0,1},
{0x00400424,1},
{0x004004a4,1},
{0x00400428,1},
{0x004004a8,1},
{0x0040042c,1},
{0x004004ac,1},
{0x00400430,1},
{0x004004b0,1},
{0x00400434,1},
{0x004004b4,1},
{0x00400438,1},
{0x004004b8,1},
{0x0040043c,1},
{0x004004bc,1},
{0x00400440,1},
{0x004004c0,1},
{0x00400444,1},
{0x004004c4,1},
{0x00400448,1},
{0x004004c8,1},
{0x0040044c,1},
{0x004004cc,1},
{0x00400450,1},
{0x004004d0,1},
{0x00400454,1},
{0x004004d4,1},
{0x00400458,1},
{0x004004d8,1},
{0x0040045c,1},
{0x004004dc,1},
{0x00400460,1},
{0x004004e0,1},
{0x00400464,1},
{0x004004e4,1},
{0x00400468,1},
{0x004004e8,1},
{0x0040046c,1},
{0x004004ec,1},
{0x00400470,1},
{0x004004f0,1},
{0x00400474,1},
{0x004004f4,1},
{0x00400478,1},
{0x004004f8,1},
{0x0040047c,1},
{0x004004fc,1},
{0x0040053c,1},
{0x00400544,1},
{0x00400540,1},
{0x00400548,1},
{0x00400560,1},
{0x00400568,1},
{0x00400564,1},
{0x0040056c,1},
{0x00400534,1},
{0x00400538,1},
{0x00400514,1},
{0x00400518,1},
{0x0040051c,1},
{0x00400520,1},
{0x00400524,1},
{0x00400528,1},
{0x0040052c,1},
{0x00400530,1},
{0x00400d00,1},
{0x00400d40,1},
{0x00400d80,1},
{0x00400d04,1},
{0x00400d44,1},
{0x00400d84,1},
{0x00400d08,1},
{0x00400d48,1},
{0x00400d88,1},
{0x00400d0c,1},
{0x00400d4c,1},
{0x00400d8c,1},
{0x00400d10,1},
{0x00400d50,1},
{0x00400d90,1},
{0x00400d14,1},
{0x00400d54,1},
{0x00400d94,1},
{0x00400d18,1},
{0x00400d58,1},
{0x00400d98,1},
{0x00400d1c,1},
{0x00400d5c,1},
{0x00400d9c,1},
{0x00400d20,1},
{0x00400d60,1},
{0x00400da0,1},
{0x00400d24,1},
{0x00400d64,1},
{0x00400da4,1},
{0x00400d28,1},
{0x00400d68,1},
{0x00400da8,1},
{0x00400d2c,1},
{0x00400d6c,1},
{0x00400dac,1},
{0x00400d30,1},
{0x00400d70,1},
{0x00400db0,1},
{0x00400d34,1},
{0x00400d74,1},
{0x00400db4,1},
{0x00400d38,1},
{0x00400d78,1},
{0x00400db8,1},
{0x00400d3c,1},
{0x00400d7c,1},
{0x00400dbc,1},
{0x00400590,1},
{0x00400594,1},
{0x00400598,1},
{0x0040059c,1},
{0x004005a8,1},
{0x004005ac,1},
{0x004005b0,1},
{0x004005b4,1},
{0x004005c0,1},
{0x004005c4,1},
{0x004005c8,1},
{0x004005cc,1},
{0x004005d0,1},
{0x004005d4,1},
{0x004005d8,1},
{0x004005dc,1},
{0x004005e0,1},
{NV04_PGRAPH_PASSTHRU_0,1},
{NV04_PGRAPH_PASSTHRU_1,1},
{NV04_PGRAPH_PASSTHRU_2,1},
{NV04_PGRAPH_DVD_COLORFMT,1},
{NV04_PGRAPH_SCALED_FORMAT,1},
{NV04_PGRAPH_MISC24_0,1},
{NV04_PGRAPH_MISC24_1,1},
{NV04_PGRAPH_MISC24_2,1},
{0x00400500,1},
{0x00400504,1},
{NV04_PGRAPH_VALID1,1},
{NV04_PGRAPH_VALID2,1}
static uint32_t nv04_graph_ctx_regs [] = {
NV04_PGRAPH_CTX_SWITCH1,
NV04_PGRAPH_CTX_SWITCH2,
NV04_PGRAPH_CTX_SWITCH3,
NV04_PGRAPH_CTX_SWITCH4,
NV04_PGRAPH_CTX_CACHE1,
NV04_PGRAPH_CTX_CACHE2,
NV04_PGRAPH_CTX_CACHE3,
NV04_PGRAPH_CTX_CACHE4,
0x00400184,
0x004001a4,
0x004001c4,
0x004001e4,
0x00400188,
0x004001a8,
0x004001c8,
0x004001e8,
0x0040018c,
0x004001ac,
0x004001cc,
0x004001ec,
0x00400190,
0x004001b0,
0x004001d0,
0x004001f0,
0x00400194,
0x004001b4,
0x004001d4,
0x004001f4,
0x00400198,
0x004001b8,
0x004001d8,
0x004001f8,
0x0040019c,
0x004001bc,
0x004001dc,
0x004001fc,
0x00400174,
NV04_PGRAPH_DMA_START_0,
NV04_PGRAPH_DMA_START_1,
NV04_PGRAPH_DMA_LENGTH,
NV04_PGRAPH_DMA_MISC,
NV04_PGRAPH_DMA_PITCH,
NV04_PGRAPH_BOFFSET0,
NV04_PGRAPH_BBASE0,
NV04_PGRAPH_BLIMIT0,
NV04_PGRAPH_BOFFSET1,
NV04_PGRAPH_BBASE1,
NV04_PGRAPH_BLIMIT1,
NV04_PGRAPH_BOFFSET2,
NV04_PGRAPH_BBASE2,
NV04_PGRAPH_BLIMIT2,
NV04_PGRAPH_BOFFSET3,
NV04_PGRAPH_BBASE3,
NV04_PGRAPH_BLIMIT3,
NV04_PGRAPH_BOFFSET4,
NV04_PGRAPH_BBASE4,
NV04_PGRAPH_BLIMIT4,
NV04_PGRAPH_BOFFSET5,
NV04_PGRAPH_BBASE5,
NV04_PGRAPH_BLIMIT5,
NV04_PGRAPH_BPITCH0,
NV04_PGRAPH_BPITCH1,
NV04_PGRAPH_BPITCH2,
NV04_PGRAPH_BPITCH3,
NV04_PGRAPH_BPITCH4,
NV04_PGRAPH_SURFACE,
NV04_PGRAPH_STATE,
NV04_PGRAPH_BSWIZZLE2,
NV04_PGRAPH_BSWIZZLE5,
NV04_PGRAPH_BPIXEL,
NV04_PGRAPH_NOTIFY,
NV04_PGRAPH_PATT_COLOR0,
NV04_PGRAPH_PATT_COLOR1,
NV04_PGRAPH_PATT_COLORRAM+0x00,
NV04_PGRAPH_PATT_COLORRAM+0x01,
NV04_PGRAPH_PATT_COLORRAM+0x02,
NV04_PGRAPH_PATT_COLORRAM+0x03,
NV04_PGRAPH_PATT_COLORRAM+0x04,
NV04_PGRAPH_PATT_COLORRAM+0x05,
NV04_PGRAPH_PATT_COLORRAM+0x06,
NV04_PGRAPH_PATT_COLORRAM+0x07,
NV04_PGRAPH_PATT_COLORRAM+0x08,
NV04_PGRAPH_PATT_COLORRAM+0x09,
NV04_PGRAPH_PATT_COLORRAM+0x0A,
NV04_PGRAPH_PATT_COLORRAM+0x0B,
NV04_PGRAPH_PATT_COLORRAM+0x0C,
NV04_PGRAPH_PATT_COLORRAM+0x0D,
NV04_PGRAPH_PATT_COLORRAM+0x0E,
NV04_PGRAPH_PATT_COLORRAM+0x0F,
NV04_PGRAPH_PATT_COLORRAM+0x10,
NV04_PGRAPH_PATT_COLORRAM+0x11,
NV04_PGRAPH_PATT_COLORRAM+0x12,
NV04_PGRAPH_PATT_COLORRAM+0x13,
NV04_PGRAPH_PATT_COLORRAM+0x14,
NV04_PGRAPH_PATT_COLORRAM+0x15,
NV04_PGRAPH_PATT_COLORRAM+0x16,
NV04_PGRAPH_PATT_COLORRAM+0x17,
NV04_PGRAPH_PATT_COLORRAM+0x18,
NV04_PGRAPH_PATT_COLORRAM+0x19,
NV04_PGRAPH_PATT_COLORRAM+0x1A,
NV04_PGRAPH_PATT_COLORRAM+0x1B,
NV04_PGRAPH_PATT_COLORRAM+0x1C,
NV04_PGRAPH_PATT_COLORRAM+0x1D,
NV04_PGRAPH_PATT_COLORRAM+0x1E,
NV04_PGRAPH_PATT_COLORRAM+0x1F,
NV04_PGRAPH_PATT_COLORRAM+0x20,
NV04_PGRAPH_PATT_COLORRAM+0x21,
NV04_PGRAPH_PATT_COLORRAM+0x22,
NV04_PGRAPH_PATT_COLORRAM+0x23,
NV04_PGRAPH_PATT_COLORRAM+0x24,
NV04_PGRAPH_PATT_COLORRAM+0x25,
NV04_PGRAPH_PATT_COLORRAM+0x26,
NV04_PGRAPH_PATT_COLORRAM+0x27,
NV04_PGRAPH_PATT_COLORRAM+0x28,
NV04_PGRAPH_PATT_COLORRAM+0x29,
NV04_PGRAPH_PATT_COLORRAM+0x2A,
NV04_PGRAPH_PATT_COLORRAM+0x2B,
NV04_PGRAPH_PATT_COLORRAM+0x2C,
NV04_PGRAPH_PATT_COLORRAM+0x2D,
NV04_PGRAPH_PATT_COLORRAM+0x2E,
NV04_PGRAPH_PATT_COLORRAM+0x2F,
NV04_PGRAPH_PATT_COLORRAM+0x30,
NV04_PGRAPH_PATT_COLORRAM+0x31,
NV04_PGRAPH_PATT_COLORRAM+0x32,
NV04_PGRAPH_PATT_COLORRAM+0x33,
NV04_PGRAPH_PATT_COLORRAM+0x34,
NV04_PGRAPH_PATT_COLORRAM+0x35,
NV04_PGRAPH_PATT_COLORRAM+0x36,
NV04_PGRAPH_PATT_COLORRAM+0x37,
NV04_PGRAPH_PATT_COLORRAM+0x38,
NV04_PGRAPH_PATT_COLORRAM+0x39,
NV04_PGRAPH_PATT_COLORRAM+0x3A,
NV04_PGRAPH_PATT_COLORRAM+0x3B,
NV04_PGRAPH_PATT_COLORRAM+0x3C,
NV04_PGRAPH_PATT_COLORRAM+0x3D,
NV04_PGRAPH_PATT_COLORRAM+0x3E,
NV04_PGRAPH_PATT_COLORRAM+0x3F,
NV04_PGRAPH_PATTERN,
0x0040080c,
NV04_PGRAPH_PATTERN_SHAPE,
0x00400600,
NV04_PGRAPH_ROP3,
NV04_PGRAPH_CHROMA,
NV04_PGRAPH_BETA_AND,
NV04_PGRAPH_BETA_PREMULT,
NV04_PGRAPH_CONTROL0,
NV04_PGRAPH_CONTROL1,
NV04_PGRAPH_CONTROL2,
NV04_PGRAPH_BLEND,
NV04_PGRAPH_STORED_FMT,
NV04_PGRAPH_SOURCE_COLOR,
0x00400560,
0x00400568,
0x00400564,
0x0040056c,
0x00400400,
0x00400480,
0x00400404,
0x00400484,
0x00400408,
0x00400488,
0x0040040c,
0x0040048c,
0x00400410,
0x00400490,
0x00400414,
0x00400494,
0x00400418,
0x00400498,
0x0040041c,
0x0040049c,
0x00400420,
0x004004a0,
0x00400424,
0x004004a4,
0x00400428,
0x004004a8,
0x0040042c,
0x004004ac,
0x00400430,
0x004004b0,
0x00400434,
0x004004b4,
0x00400438,
0x004004b8,
0x0040043c,
0x004004bc,
0x00400440,
0x004004c0,
0x00400444,
0x004004c4,
0x00400448,
0x004004c8,
0x0040044c,
0x004004cc,
0x00400450,
0x004004d0,
0x00400454,
0x004004d4,
0x00400458,
0x004004d8,
0x0040045c,
0x004004dc,
0x00400460,
0x004004e0,
0x00400464,
0x004004e4,
0x00400468,
0x004004e8,
0x0040046c,
0x004004ec,
0x00400470,
0x004004f0,
0x00400474,
0x004004f4,
0x00400478,
0x004004f8,
0x0040047c,
0x004004fc,
0x0040053c,
0x00400544,
0x00400540,
0x00400548,
0x00400560,
0x00400568,
0x00400564,
0x0040056c,
0x00400534,
0x00400538,
0x00400514,
0x00400518,
0x0040051c,
0x00400520,
0x00400524,
0x00400528,
0x0040052c,
0x00400530,
0x00400d00,
0x00400d40,
0x00400d80,
0x00400d04,
0x00400d44,
0x00400d84,
0x00400d08,
0x00400d48,
0x00400d88,
0x00400d0c,
0x00400d4c,
0x00400d8c,
0x00400d10,
0x00400d50,
0x00400d90,
0x00400d14,
0x00400d54,
0x00400d94,
0x00400d18,
0x00400d58,
0x00400d98,
0x00400d1c,
0x00400d5c,
0x00400d9c,
0x00400d20,
0x00400d60,
0x00400da0,
0x00400d24,
0x00400d64,
0x00400da4,
0x00400d28,
0x00400d68,
0x00400da8,
0x00400d2c,
0x00400d6c,
0x00400dac,
0x00400d30,
0x00400d70,
0x00400db0,
0x00400d34,
0x00400d74,
0x00400db4,
0x00400d38,
0x00400d78,
0x00400db8,
0x00400d3c,
0x00400d7c,
0x00400dbc,
0x00400590,
0x00400594,
0x00400598,
0x0040059c,
0x004005a8,
0x004005ac,
0x004005b0,
0x004005b4,
0x004005c0,
0x004005c4,
0x004005c8,
0x004005cc,
0x004005d0,
0x004005d4,
0x004005d8,
0x004005dc,
0x004005e0,
NV04_PGRAPH_PASSTHRU_0,
NV04_PGRAPH_PASSTHRU_1,
NV04_PGRAPH_PASSTHRU_2,
NV04_PGRAPH_DVD_COLORFMT,
NV04_PGRAPH_SCALED_FORMAT,
NV04_PGRAPH_MISC24_0,
NV04_PGRAPH_MISC24_1,
NV04_PGRAPH_MISC24_2,
0x00400500,
0x00400504,
NV04_PGRAPH_VALID1,
NV04_PGRAPH_VALID2
};
@ -290,49 +349,42 @@ struct reg_interval
void nouveau_nv04_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int channel, channel_old, i, j, index;
struct nouveau_channel *next, *last;
int chid;
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
channel_old = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
next = dev_priv->fifos[chid];
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel);
chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
last = dev_priv->fifos[chid];
NV_WRITE(NV03_PFIFO_CACHES, 0x0);
DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",last->id, next->id);
/* NV_WRITE(NV03_PFIFO_CACHES, 0x0);
NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
nouveau_wait_for_idle(dev);
if (last)
nv04_graph_save_context(last);
// save PGRAPH context
index=0;
for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
{
dev_priv->fifos[channel_old]->pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4);
index++;
}
nouveau_wait_for_idle(dev);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000);
NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24));
// restore PGRAPH context
index=0;
for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
{
NV_WRITE(nv04_graph_ctx_regs[i].reg+j*4, dev_priv->fifos[channel]->pgraph_ctx[index]);
index++;
}
nouveau_wait_for_idle(dev);
nv04_graph_load_context(next);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV04_PGRAPH_CTX_USER, channel << 24);
NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24);
NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF);
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
/* NV_WRITE(NV04_PGRAPH_FIFO,0x0);
NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1);
NV_WRITE(NV03_PFIFO_CACHES, 0x1);
NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
@ -356,19 +408,30 @@ void nv04_graph_destroy_context(struct nouveau_channel *chan)
int nv04_graph_load_context(struct nouveau_channel *chan)
{
DRM_ERROR("stub!\n");
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
NV_WRITE(nv04_graph_ctx_regs[i], chan->pgraph_ctx[i]);
return 0;
}
int nv04_graph_save_context(struct nouveau_channel *chan)
{
DRM_ERROR("stub!\n");
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
chan->pgraph_ctx[i] = NV_READ(nv04_graph_ctx_regs[i]);
return 0;
}
int nv04_graph_init(struct drm_device *dev) {
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i,sum=0;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@ -380,23 +443,19 @@ int nv04_graph_init(struct drm_device *dev) {
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
// check the context is big enough
for ( i = 0 ; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
sum+=nv04_graph_ctx_regs[i].number;
if ( sum*4>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
if ( sizeof(nv04_graph_ctx_regs)>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
DRM_ERROR("pgraph_ctx too small\n");
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1230C000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111101);
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11D5F071);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 |
(0x00D00000) |
(1<<29) |
(1<<31));
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);
NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100);

View File

@ -9,21 +9,18 @@ nv04_instmem_determine_amount(struct drm_device *dev)
int i;
/* Figure out how much instance memory we need */
switch (dev_priv->card_type) {
case NV_40:
if (dev_priv->card_type >= NV_40) {
/* We'll want more instance memory than this on some NV4x cards.
* There's a 16MB aperture to play with that maps onto the end
* of vram. For now, only reserve a small piece until we know
* more about what each chipset requires.
*/
dev_priv->ramin_rsvd_vram = (1*1024* 1024);
break;
default:
} else {
/*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
* exist in vram on those cards as well?
*/
dev_priv->ramin_rsvd_vram = (512*1024);
break;
}
DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
@ -73,7 +70,6 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev)
case NV_11:
case NV_10:
case NV_04:
case NV_03:
default:
dev_priv->ramfc_offset = 0x11400;
dev_priv->ramfc_size = nouveau_fifo_number(dev) *
@ -97,6 +93,14 @@ int nv04_instmem_init(struct drm_device *dev)
* the space that was reserved for RAMHT/FC/RO.
*/
offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
/* On my NV4E, there's *something* clobbering the 16KiB just after
* where we setup these fixed tables. No idea what it is just yet,
* so reserve this space on all NV4X cards for now.
*/
if (dev_priv->card_type >= NV_40)
offset += 16*1024;
ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
offset, dev_priv->ramin_rsvd_vram - offset);
if (ret) {

View File

@ -27,13 +27,68 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#define NV10_FIFO_NUMBER 32
static void nv10_praph_pipe(struct drm_device *dev) {
struct pipe_state {
uint32_t pipe_0x0000[0x040/4];
uint32_t pipe_0x0040[0x010/4];
uint32_t pipe_0x0200[0x0c0/4];
uint32_t pipe_0x4400[0x080/4];
uint32_t pipe_0x6400[0x3b0/4];
uint32_t pipe_0x6800[0x2f0/4];
uint32_t pipe_0x6c00[0x030/4];
uint32_t pipe_0x7000[0x130/4];
uint32_t pipe_0x7400[0x0c0/4];
uint32_t pipe_0x7800[0x0c0/4];
};
/* TODO dynamic allocation ??? */
static struct pipe_state pipe_state[NV10_FIFO_NUMBER];
static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
int i;
#define PIPE_SAVE(addr) \
do { \
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
} while (0)
PIPE_SAVE(0x4400);
PIPE_SAVE(0x0200);
PIPE_SAVE(0x6400);
PIPE_SAVE(0x6800);
PIPE_SAVE(0x6c00);
PIPE_SAVE(0x7000);
PIPE_SAVE(0x7400);
PIPE_SAVE(0x7800);
PIPE_SAVE(0x0040);
PIPE_SAVE(0x0000);
#undef PIPE_SAVE
}
static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
int i;
uint32_t xfmode0, xfmode1;
#define PIPE_RESTORE(addr) \
do { \
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
} while (0)
nouveau_wait_for_idle(dev);
/* XXX check haiku comments */
xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
@ -43,7 +98,6 @@ static void nv10_praph_pipe(struct drm_device *dev) {
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
for (i = 0; i < 3; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
@ -54,138 +108,179 @@ static void nv10_praph_pipe(struct drm_device *dev) {
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000200);
for (i = 0; i < 48; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
PIPE_RESTORE(0x0200);
nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_XFMODE0, 0x00000000);
NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006400);
for (i = 0; i < 211; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006800);
for (i = 0; i < 162; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
for (i = 0; i < 25; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006c00);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0xbf800000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
for (i = 0; i < 35; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007400);
for (i = 0; i < 48; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007800);
for (i = 0; i < 48; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00004400);
for (i = 0; i < 32; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000000);
for (i = 0; i < 16; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
for (i = 0; i < 4; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
/* restore XFMODE */
NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
PIPE_RESTORE(0x6400);
PIPE_RESTORE(0x6800);
PIPE_RESTORE(0x6c00);
PIPE_RESTORE(0x7000);
PIPE_RESTORE(0x7400);
PIPE_RESTORE(0x7800);
PIPE_RESTORE(0x4400);
PIPE_RESTORE(0x0000);
PIPE_RESTORE(0x0040);
nouveau_wait_for_idle(dev);
#undef PIPE_RESTORE
}
/* TODO replace address with name
use loops */
static int nv10_graph_ctx_regs [] = {
NV03_PGRAPH_XY_LOGIC_MISC0,
static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
uint32_t *fifo_pipe_state_addr;
int i;
#define PIPE_INIT(addr) \
do { \
fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
} while (0)
#define PIPE_INIT_END(addr) \
do { \
if (fifo_pipe_state_addr != \
sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \
sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
} while (0)
#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
PIPE_INIT(0x0200);
for (i = 0; i < 48; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x0200);
PIPE_INIT(0x6400);
for (i = 0; i < 211; i++)
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x3f800000);
NV_WRITE_PIPE_INIT(0x40000000);
NV_WRITE_PIPE_INIT(0x40000000);
NV_WRITE_PIPE_INIT(0x40000000);
NV_WRITE_PIPE_INIT(0x40000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x3f800000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x3f000000);
NV_WRITE_PIPE_INIT(0x3f000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x3f800000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x3f800000);
NV_WRITE_PIPE_INIT(0x3f800000);
NV_WRITE_PIPE_INIT(0x3f800000);
NV_WRITE_PIPE_INIT(0x3f800000);
PIPE_INIT_END(0x6400);
PIPE_INIT(0x6800);
for (i = 0; i < 162; i++)
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x3f800000);
for (i = 0; i < 25; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x6800);
PIPE_INIT(0x6c00);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0xbf800000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x6c00);
PIPE_INIT(0x7000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x00000000);
NV_WRITE_PIPE_INIT(0x7149f2ca);
for (i = 0; i < 35; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x7000);
PIPE_INIT(0x7400);
for (i = 0; i < 48; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x7400);
PIPE_INIT(0x7800);
for (i = 0; i < 48; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x7800);
PIPE_INIT(0x4400);
for (i = 0; i < 32; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x4400);
PIPE_INIT(0x0000);
for (i = 0; i < 16; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x0000);
PIPE_INIT(0x0040);
for (i = 0; i < 4; i++)
NV_WRITE_PIPE_INIT(0x00000000);
PIPE_INIT_END(0x0040);
#undef PIPE_INIT
#undef PIPE_INIT_END
#undef NV_WRITE_PIPE_INIT
}
static int nv10_graph_ctx_regs [] = {
NV10_PGRAPH_CTX_SWITCH1,
NV10_PGRAPH_CTX_SWITCH2,
NV10_PGRAPH_CTX_SWITCH3,
@ -455,6 +550,7 @@ NV03_PGRAPH_ABS_UCLIPA_YMIN,
NV03_PGRAPH_ABS_UCLIPA_YMAX,
NV03_PGRAPH_ABS_ICLIP_XMAX,
NV03_PGRAPH_ABS_ICLIP_YMAX,
NV03_PGRAPH_XY_LOGIC_MISC0,
NV03_PGRAPH_XY_LOGIC_MISC1,
NV03_PGRAPH_XY_LOGIC_MISC2,
NV03_PGRAPH_XY_LOGIC_MISC3,
@ -462,18 +558,18 @@ NV03_PGRAPH_CLIPX_0,
NV03_PGRAPH_CLIPX_1,
NV03_PGRAPH_CLIPY_0,
NV03_PGRAPH_CLIPY_1,
0x00400e40,
0x00400e44,
0x00400e48,
0x00400e4c,
0x00400e50,
0x00400e54,
0x00400e58,
0x00400e5c,
0x00400e60,
0x00400e64,
0x00400e68,
0x00400e6c,
NV10_PGRAPH_COMBINER0_IN_ALPHA,
NV10_PGRAPH_COMBINER1_IN_ALPHA,
NV10_PGRAPH_COMBINER0_IN_RGB,
NV10_PGRAPH_COMBINER1_IN_RGB,
NV10_PGRAPH_COMBINER_COLOR0,
NV10_PGRAPH_COMBINER_COLOR1,
NV10_PGRAPH_COMBINER0_OUT_ALPHA,
NV10_PGRAPH_COMBINER1_OUT_ALPHA,
NV10_PGRAPH_COMBINER0_OUT_RGB,
NV10_PGRAPH_COMBINER1_OUT_RGB,
NV10_PGRAPH_COMBINER_FINAL0,
NV10_PGRAPH_COMBINER_FINAL1,
0x00400e00,
0x00400e04,
0x00400e08,
@ -557,6 +653,8 @@ int nv10_graph_load_context(struct nouveau_channel *chan)
NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]);
}
nv10_graph_load_pipe(chan);
return 0;
}
@ -573,49 +671,66 @@ int nv10_graph_save_context(struct nouveau_channel *chan)
chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
}
nv10_graph_save_pipe(chan);
return 0;
}
void nouveau_nv10_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_private *dev_priv;
struct nouveau_channel *next, *last;
int chid;
chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
if (!dev) {
DRM_DEBUG("Invalid drm_device\n");
return;
}
dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_DEBUG("Invalid drm_nouveau_private\n");
return;
}
if (!dev_priv->fifos) {
DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
return;
}
chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20)&(nouveau_fifo_number(dev)-1);
next = dev_priv->fifos[chid];
if (!next) {
DRM_DEBUG("Invalid next channel\n");
return;
}
chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
last = dev_priv->fifos[chid];
DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",
last->id, next->id);
if (!last) {
DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
next->id);
} else {
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
last->id, next->id);
}
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
#if 0
NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000);
NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000);
NV_WRITE(NV_PFIFO_CACHES, 0x00000000);
#endif
nv10_graph_save_context(last);
if (last) {
nouveau_wait_for_idle(dev);
nv10_graph_save_context(last);
}
nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
NV_WRITE(NV10_PGRAPH_CTX_USER, (NV_READ(NV10_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24));
nouveau_wait_for_idle(dev);
nv10_graph_load_context(next);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24);
NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
#if 0
NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001);
NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001);
NV_WRITE(NV_PFIFO_CACHES, 0x00000001);
#endif
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
@ -628,12 +743,14 @@ void nouveau_nv10_context_switch(struct drm_device *dev)
int nv10_graph_create_context(struct nouveau_channel *chan) {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp, vramsz;
DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
/* mmio trace suggest that should be done in ddx with methods/objects */
#if 0
uint32_t tmp, vramsz;
/* per channel init from ddx */
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
/*XXX the original ddx code, does this in 2 steps :
@ -658,27 +775,49 @@ int nv10_graph_create_context(struct nouveau_channel *chan) {
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
#endif
NV_WRITE_CTX(0x00400e88, 0x08000000);
NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
/* is it really needed ??? */
NV_WRITE_CTX(0x00400e10, 0x00001000);
NV_WRITE_CTX(0x00400e14, 0x00001000);
NV_WRITE_CTX(0x00400e30, 0x00080008);
NV_WRITE_CTX(0x00400e34, 0x00080008);
if (dev_priv->chipset>=0x17) {
/* is it really needed ??? */
NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
NV_WRITE_CTX(0x00400eac, 0x0fff0000);
NV_WRITE_CTX(0x00400eb0, 0x0fff0000);
NV_WRITE_CTX(0x00400ec0, 0x00000080);
NV_WRITE_CTX(0x00400ed0, 0x00000080);
}
NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
/* for the first channel init the regs */
if (dev_priv->fifo_alloc_count == 0)
nv10_graph_load_context(chan);
//XXX should be saved/restored for each fifo
//we supposed here we have X fifo and only one 3D fifo.
nv10_praph_pipe(dev);
nv10_graph_create_pipe(chan);
return 0;
}
void nv10_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int chid;
chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
/* does this avoid a potential context switch while we are written graph
* reg, or we should mask graph interrupt ???
*/
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
if (chid == chan->id) {
DRM_INFO("cleanning a channel with graph in current context\n");
nouveau_wait_for_idle(dev);
DRM_INFO("reseting current graph context\n");
nv10_graph_create_context(chan);
nv10_graph_load_context(chan);
}
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
int nv10_graph_init(struct drm_device *dev) {
@ -696,10 +835,17 @@ int nv10_graph_init(struct drm_device *dev) {
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0030 |
//NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
(1<<29) |
(1<<31));
if (dev_priv->chipset>=0x17) {
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000);
NV_WRITE(0x004006b0, 0x40000020);
}
else
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
/* copy tile info from PFB */
for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
@ -709,6 +855,10 @@ int nv10_graph_init(struct drm_device *dev) {
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
}
NV_WRITE(NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@
* between the contexts
*/
#define NV40_GRCTX_SIZE (175*1024)
#define NV41_GRCTX_SIZE (92*1024)
#define NV43_GRCTX_SIZE (70*1024)
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
#define NV49_GRCTX_SIZE (164640)
@ -187,6 +188,116 @@ nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void
nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
for (i = 0x00000178; i <= 0x00000180; i += 4)
INSTANCE_WR(ctx, i/4, 0x00000040);
INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
for (i = 0x00000194; i <= 0x000001b0; i += 4)
INSTANCE_WR(ctx, i/4, 0x80000000);
INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
for (i = 0x00000350; i <= 0x0000035c; i += 4)
INSTANCE_WR(ctx, i/4, 0x55555555);
INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111);
INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060);
INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080);
INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000);
INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001);
INSTANCE_WR(ctx, 0x00000408/4, 0x46400000);
INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000);
INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x00000430/4, 0x00011100);
for (i = 0x0000044c; i <= 0x00000488; i += 4)
INSTANCE_WR(ctx, i/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff);
INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000);
INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040);
INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888);
INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8);
INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000);
INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6);
INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699);
INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98);
INSTANCE_WR(ctx, 0x00000538/4, 0x00000098);
INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff);
INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000);
INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000);
INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00);
for (i = 0x000005dc; i <= 0x00000618; i += 4)
INSTANCE_WR(ctx, i/4, 0x00018488);
for (i = 0x0000061c; i <= 0x00000658; i += 4)
INSTANCE_WR(ctx, i/4, 0x00028202);
for (i = 0x0000069c; i <= 0x000006d8; i += 4)
INSTANCE_WR(ctx, i/4, 0x0000aae4);
for (i = 0x000006dc; i <= 0x00000718; i += 4)
INSTANCE_WR(ctx, i/4, 0x01012000);
for (i = 0x0000071c; i <= 0x00000758; i += 4)
INSTANCE_WR(ctx, i/4, 0x00080008);
for (i = 0x0000079c; i <= 0x000007d8; i += 4)
INSTANCE_WR(ctx, i/4, 0x00100008);
for (i = 0x0000082c; i <= 0x00000838; i += 4)
INSTANCE_WR(ctx, i/4, 0x0001bc80);
for (i = 0x0000083c; i <= 0x00000848; i += 4)
INSTANCE_WR(ctx, i/4, 0x00000202);
for (i = 0x0000085c; i <= 0x00000868; i += 4)
INSTANCE_WR(ctx, i/4, 0x00000008);
for (i = 0x0000087c; i <= 0x00000888; i += 4)
INSTANCE_WR(ctx, i/4, 0x00080008);
INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002);
INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021);
INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3);
INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200);
INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff);
INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00);
INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000);
INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100);
INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001);
INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001);
INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003);
INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001);
INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005);
INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff);
for (i = 0x00000ad4; i <= 0x00000ae4; i += 4)
INSTANCE_WR(ctx, i/4, 0x00005555);
INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001);
INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001);
for (i = 0x00002ee8; i <= 0x00002f60; i += 8)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i = 0x00005168; i <= 0x00007358; i += 24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i = 0x00007368; i <= 0x00007758; i += 16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i = 0x0000a068; i <= 0x0000c258; i += 24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i = 0x0000c268; i <= 0x0000c658; i += 16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i = 0x0000ef68; i <= 0x00011158; i += 24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i = 0x00011168; i <= 0x00011558; i += 16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i = 0x00013e68; i <= 0x00016058; i += 24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i = 0x00016068; i <= 0x00016458; i += 16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
};
static void
nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
@ -1237,6 +1348,11 @@ nv40_graph_create_context(struct nouveau_channel *chan)
ctx_size = NV40_GRCTX_SIZE;
ctx_init = nv40_graph_context_init;
break;
case 0x41:
case 0x42:
ctx_size = NV41_GRCTX_SIZE;
ctx_init = nv41_graph_context_init;
break;
case 0x43:
ctx_size = NV43_GRCTX_SIZE;
ctx_init = nv43_graph_context_init;
@ -1249,6 +1365,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
ctx_size = NV49_GRCTX_SIZE;
ctx_init = nv49_graph_context_init;
break;
case 0x44:
case 0x4a:
ctx_size = NV4A_GRCTX_SIZE;
ctx_init = nv4a_graph_context_init;
@ -1292,24 +1409,34 @@ static int
nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t old_cp, tv = 1000;
uint32_t old_cp, tv = 1000, tmp;
int i;
old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV40_PGRAPH_CTXCTL_0310,
save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD);
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310);
tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304);
tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
for (i = 0; i < tv; i++) {
if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
break;
}
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
if (i == tv) {
DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT);
DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save);
DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n",
ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
DRM_ERROR("0x40030C = 0x%08x\n",
NV_READ(NV40_PGRAPH_CTXCTL_030C));
return -EBUSY;
@ -1420,6 +1547,37 @@ static uint32_t nv40_ctx_voodoo[] = {
~0
};
static uint32_t nv41_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1,
0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800,
0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480,
0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a,
0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001,
0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020,
0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv43_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
@ -1453,6 +1611,39 @@ static uint32_t nv43_ctx_voodoo[] = {
~0
};
static uint32_t nv44_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5,
0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b,
0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d,
0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6,
0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158,
0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9,
0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0,
0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f,
0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec,
0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a,
0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691,
0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc,
0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901,
0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08,
0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8,
0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001,
0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029,
0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a,
0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000,
0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007,
0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a,
0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05,
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv46_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
@ -1550,6 +1741,37 @@ static uint32_t nv4a_ctx_voodoo[] = {
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv4c_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6,
0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682,
0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0,
0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a,
0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a,
0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080,
0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004,
0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168,
0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68,
0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000,
0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000,
0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306,
0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv4e_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
@ -1608,11 +1830,15 @@ nv40_graph_init(struct drm_device *dev)
switch (dev_priv->chipset) {
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
case 0x41:
case 0x42: ctx_voodoo = nv41_ctx_voodoo; break;
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
case 0x44: ctx_voodoo = nv44_ctx_voodoo; break;
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4c: ctx_voodoo = nv4c_ctx_voodoo; break;
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
default:
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n",

View File

@ -63,24 +63,17 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->fifos[channel];
uint32_t inst;
DRM_DEBUG("ch%d\n", channel);
if (IS_G80) {
if (!chan->ramin)
return -EINVAL;
if (!chan->ramfc)
return -EINVAL;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
(chan->ramin->instance >> 12) |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
} else {
if (!chan->ramfc)
return -EINVAL;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
(chan->ramfc->instance >> 8) |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
}
if (IS_G80) inst = chan->ramfc->instance >> 12;
else inst = chan->ramfc->instance >> 8;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
if (!nt) nv50_fifo_init_thingo(dev);
return 0;
@ -90,16 +83,13 @@ static void
nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
if (IS_G80) {
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80);
} else {
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84);
}
if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst);
if (!nt) nv50_fifo_init_thingo(dev);
}
@ -234,7 +224,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
if (IS_G80) {
uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;
if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, ~0, 0x100,
uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start;
if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset,
vram_offset, 0x100,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&ramfc, &chan->ramfc)))

View File

@ -222,11 +222,7 @@ typedef struct drm_r128_init {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int sarea_priv_offset;
#else
unsigned long sarea_priv_offset;
#endif
int is_pci;
int cce_mode;
int cce_secure;
@ -240,21 +236,12 @@ typedef struct drm_r128_init {
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
#else
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
#endif
} drm_r128_init_t;
typedef struct drm_r128_cce_stop {
@ -264,15 +251,10 @@ typedef struct drm_r128_cce_stop {
typedef struct drm_r128_clear {
unsigned int flags;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int x, y, w, h;
#endif
unsigned int clear_color;
unsigned int clear_depth;
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
unsigned int color_mask;
unsigned int depth_mask;
#endif
} drm_r128_clear_t;
typedef struct drm_r128_vertex {

133
shared-core/xgi_drm.h Normal file
View File

@ -0,0 +1,133 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
* ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_DRM_H_
#define _XGI_DRM_H_
#include <linux/types.h>
#include <asm/ioctl.h>
struct drm_xgi_sarea {
__u16 device_id;
__u16 vendor_id;
char device_name[32];
unsigned int scrn_start;
unsigned int scrn_xres;
unsigned int scrn_yres;
unsigned int scrn_bpp;
unsigned int scrn_pitch;
};
struct xgi_bootstrap {
/**
* Size of PCI-e GART range in megabytes.
*/
struct drm_map gart;
};
enum xgi_mem_location {
XGI_MEMLOC_NON_LOCAL = 0,
XGI_MEMLOC_LOCAL = 1,
XGI_MEMLOC_INVALID = 0x7fffffff
};
struct xgi_mem_alloc {
/**
* Memory region to be used for allocation.
*
* Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL.
*/
unsigned int location;
/**
* Number of bytes request.
*
* On successful allocation, set to the actual number of bytes
* allocated.
*/
unsigned int size;
/**
* Address of the memory from the graphics hardware's point of view.
*/
__u32 hw_addr;
/**
* Offset of the allocation in the mapping.
*/
__u32 offset;
/**
* Magic handle used to release memory.
*
* See also DRM_XGI_FREE ioctl.
*/
__u32 index;
};
enum xgi_batch_type {
BTYPE_2D = 0,
BTYPE_3D = 1,
BTYPE_FLIP = 2,
BTYPE_CTRL = 3,
BTYPE_NONE = 0x7fffffff
};
struct xgi_cmd_info {
__u32 type;
__u32 hw_addr;
__u32 size;
__u32 id;
};
struct xgi_state_info {
unsigned int _fromState;
unsigned int _toState;
};
/*
* Ioctl definitions
*/
#define DRM_XGI_BOOTSTRAP 0
#define DRM_XGI_ALLOC 1
#define DRM_XGI_FREE 2
#define DRM_XGI_SUBMIT_CMDLIST 3
#define DRM_XGI_STATE_CHANGE 4
#define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
#define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
#define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
#endif /* _XGI_DRM_H_ */

View File

@ -19,6 +19,9 @@ TESTS = auth \
openclose \
getversion \
getclient \
getstats \
lock \
setversion \
updatedraw
EXTRA_PROGRAMS = $(TESTS)

View File

@ -108,7 +108,7 @@ static void server()
ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
if (ret == -1)
err(1, "Authenticating bad magic succeeded\n");
err(1, "Failure to authenticate client magic\n");
wait_event(1, CLIENT_DONE);
}

51
tests/getstats.c Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <limits.h>
#include "drmtest.h"
/**
* Checks DRM_IOCTL_GET_STATS.
*
* I don't care too much about the actual contents, just that the kernel
* doesn't crash.
*/
int main(int argc, char **argv)
{
int fd, ret;
drm_stats_t stats;
fd = drm_open_any();
ret = ioctl(fd, DRM_IOCTL_GET_STATS, &stats);
assert(ret == 0);
assert(stats.count >= 0);
close(fd);
return 0;
}

262
tests/lock.c Normal file
View File

@ -0,0 +1,262 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/** @file lock.c
* Tests various potential failures of the DRM locking mechanisms
*/
#include <limits.h>
#include "drmtest.h"
enum auth_event {
SERVER_READY,
CLIENT_MAGIC,
SERVER_LOCKED,
CLIENT_LOCKED,
};
int commfd[2];
unsigned int lock1 = 0x00001111;
unsigned int lock2 = 0x00002222;
/* return time in milliseconds */
static unsigned int
get_millis()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
static void
wait_event(int pipe, enum auth_event expected_event)
{
int ret;
enum auth_event event;
unsigned char in;
ret = read(commfd[pipe], &in, 1);
if (ret == -1)
err(1, "read error");
event = in;
if (event != expected_event)
errx(1, "unexpected event: %d\n", event);
}
static void
send_event(int pipe, enum auth_event send_event)
{
int ret;
unsigned char event;
event = send_event;
ret = write(commfd[pipe], &event, 1);
if (ret == -1)
err(1, "failed to send event %d", event);
}
static void
client_auth(int drmfd)
{
struct drm_auth auth;
int ret;
wait_event(0, SERVER_READY);
/* Get a client magic number and pass it to the master for auth. */
ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
if (ret == -1)
err(1, "Couldn't get client magic");
send_event(0, CLIENT_MAGIC);
ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
if (ret == -1)
err(1, "Couldn't write auth data");
}
static void
server_auth(int drmfd)
{
struct drm_auth auth;
int ret;
send_event(1, SERVER_READY);
wait_event(1, CLIENT_MAGIC);
ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
if (ret == -1)
err(1, "Failure to read client magic");
ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
if (ret == -1)
err(1, "Failure to authenticate client magic\n");
}
/** Tests that locking is successful in normal conditions */
static void
test_lock_unlock(int drmfd)
{
int ret;
ret = drmGetLock(drmfd, lock1, 0);
if (ret != 0)
err(1, "Locking failed");
ret = drmUnlock(drmfd, lock1);
if (ret != 0)
err(1, "Unlocking failed");
}
/** Tests that unlocking the lock while it's not held works correctly */
static void
test_unlock_unlocked(int drmfd)
{
int ret;
ret = drmUnlock(drmfd, lock1);
if (ret == 0)
err(1, "Unlocking unlocked lock succeeded");
}
/** Tests that unlocking a lock held by another context fails appropriately */
static void
test_unlock_unowned(int drmfd)
{
int ret;
ret = drmGetLock(drmfd, lock1, 0);
assert(ret == 0);
ret = drmUnlock(drmfd, lock2);
if (ret == 0)
errx(1, "Unlocking other context's lock succeeded");
ret = drmUnlock(drmfd, lock1);
assert(ret == 0);
}
/**
* Tests that an open/close by the same process doesn't result in the lock
* being dropped.
*/
static void test_open_close_locked(drmfd)
{
int ret, tempfd;
ret = drmGetLock(drmfd, lock1, 0);
assert(ret == 0);
/* XXX: Need to make sure that this is the same device as drmfd */
tempfd = drm_open_any();
close(tempfd);
ret = drmUnlock(drmfd, lock1);
if (ret != 0)
errx(1, "lock lost during open/close by same pid");
close(drmfd);
}
static void client()
{
int drmfd, ret;
unsigned int time;
/* XXX: Should make sure we open the same DRM as the master */
drmfd = drm_open_any();
client_auth(drmfd);
/* Wait for the server to grab the lock, then grab it ourselves (to
* contest it). Hopefully we hit it within the window of when the
* server locks.
*/
wait_event(0, SERVER_LOCKED);
ret = drmGetLock(drmfd, lock2, 0);
time = get_millis();
if (ret != 0)
err(1, "Failed to get lock on client\n");
drmUnlock(drmfd, lock2);
/* Tell the server that our locking completed, and when it did */
send_event(0, CLIENT_LOCKED);
ret = write(commfd[0], &time, sizeof(time));
exit(0);
}
static void server()
{
int drmfd, tempfd, ret;
unsigned int client_time, unlock_time;
drmfd = drm_open_any_master();
test_lock_unlock(drmfd);
test_unlock_unlocked(drmfd);
test_unlock_unowned(drmfd);
test_open_close_locked(drmfd);
/* Perform the authentication sequence with the client. */
server_auth(drmfd);
/* Now, test that the client attempting to lock while the server
* holds the lock works correctly.
*/
ret = drmGetLock(drmfd, lock1, 0);
assert(ret == 0);
send_event(1, SERVER_LOCKED);
/* Wait a while for the client to do its thing */
sleep(1);
ret = drmUnlock(drmfd, lock1);
assert(ret == 0);
unlock_time = get_millis();
wait_event(1, CLIENT_LOCKED);
ret = read(commfd[1], &client_time, sizeof(client_time));
if (ret == -1)
err(1, "Failure to read client magic");
if (client_time < unlock_time)
errx(1, "Client took lock before server released it");
}
int main(int argc, char **argv)
{
int ret;
ret = pipe(commfd);
if (ret == -1)
err(1, "Couldn't create pipe");
ret = fork();
if (ret == -1)
err(1, "failure to fork client");
if (ret == 0)
client();
else
server();
return 0;
}

84
tests/setversion.c Normal file
View File

@ -0,0 +1,84 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <limits.h>
#include "drmtest.h"
/**
* Checks DRM_IOCTL_SET_VERSION.
*
* This tests that we can get the actual version out, and that setting invalid
* major/minor numbers fails appropriately. It does not check the actual
* behavior differenses resulting from an increased DI version.
*/
int main(int argc, char **argv)
{
int fd, ret;
drm_set_version_t sv, version;
fd = drm_open_any_master();
/* First, check that we can get the DD/DI versions. */
memset(&version, 0, sizeof(version));
version.drm_di_major = -1;
version.drm_di_minor = -1;
version.drm_dd_major = -1;
version.drm_dd_minor = -1;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &version);
assert(ret == 0);
assert(version.drm_di_major != -1);
assert(version.drm_di_minor != -1);
assert(version.drm_dd_major != -1);
assert(version.drm_dd_minor != -1);
/* Check that an invalid DI major fails */
sv = version;
sv.drm_di_major++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
/* Check that an invalid DI minor fails */
sv = version;
sv.drm_di_major++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
/* Check that an invalid DD major fails */
sv = version;
sv.drm_dd_major++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
/* Check that an invalid DD minor fails */
sv = version;
sv.drm_dd_minor++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
close(fd);
return 0;
}