Merge commit 'origin/master' into modesetting-gem

Conflicts:
	linux-core/Makefile.kernel
	linux-core/ati_pcigart.c
	linux-core/drm_compat.h
	linux-core/drm_irq.c
	linux-core/drm_lock.c
	linux-core/i915_drv.c
	shared-core/i915_dma.c
	shared-core/i915_drv.h
	shared-core/i915_irq.c
	shared-core/nouveau_mem.c
	shared-core/radeon_cp.c
	shared-core/radeon_drv.h
main
Kristian Høgsberg 2008-07-31 15:18:32 -04:00
commit 5052e966ec
44 changed files with 2184 additions and 985 deletions

View File

@ -34,76 +34,125 @@
#include "drmP.h" #include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */ #define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
#define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
#define ATI_PCIE_WRITE 0x4
#define ATI_PCIE_READ 0x8
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE,
gart_info->table_mask);
if (dev->sg->dmah == NULL)
return ENOMEM;
return 0;
}
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
drm_pci_free(dev, dev->sg->dmah);
dev->sg->dmah = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
/* we need to support large memory configurations */
if (dev->sg == NULL) {
DRM_ERROR("no scatter/gather memory!\n");
return 0;
}
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
gart_info->bus_addr = 0;
if (dev->sg->dmah)
drm_ati_free_pcigart_table(dev, gart_info);
}
}
return 1;
}
int drm_ati_pcigart_init(struct drm_device *dev, int drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info) struct drm_ati_pcigart_info *gart_info)
{ {
unsigned long pages;
u32 *pci_gart = NULL, page_base;
int i, j;
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base;
dma_addr_t bus_address = 0;
int i, j, ret = 0;
int max_pages;
dma_addr_t entry_addr;
/* we need to support large memory configurations */
if (dev->sg == NULL) { if (dev->sg == NULL) {
DRM_ERROR( "no scatter/gather memory!\n" ); DRM_ERROR("no scatter/gather memory!\n");
return 0; goto done;
} }
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
/* GART table in system memory */ DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size, 0,
0xfffffffful);
if (dev->sg->dmah == NULL) {
DRM_ERROR("cannot allocate PCI GART table!\n");
return 0;
}
gart_info->addr = (void *)dev->sg->dmah->vaddr;
gart_info->bus_addr = dev->sg->dmah->busaddr;
pci_gart = (u32 *)dev->sg->dmah->vaddr;
} else {
/* GART table in framebuffer memory */
pci_gart = gart_info->addr;
}
pages = DRM_MIN(dev->sg->pages, gart_info->table_size / sizeof(u32));
bzero(pci_gart, gart_info->table_size); ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
address = (void *)dev->sg->dmah->vaddr;
bus_address = dev->sg->dmah->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
(unsigned int)bus_address, (unsigned long)address);
}
pci_gart = (u32 *) address;
max_pages = (gart_info->table_size / sizeof(u32));
pages = (dev->sg->pages <= max_pages)
? dev->sg->pages : max_pages;
memset(pci_gart, 0, max_pages * sizeof(u32));
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small")); KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
for ( i = 0 ; i < pages ; i++ ) { for (i = 0; i < pages; i++) {
page_base = (u32) dev->sg->busaddr[i]; entry_addr = dev->sg->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
page_base = (u32) entry_addr & ATI_PCIGART_PAGE_MASK;
switch(gart_info->gart_reg_if) { switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP: case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32(page_base | 0xc); page_base |= (upper_32_bits(entry_addr) & 0xff) << 4;
page_base |= 0xc;
break; break;
case DRM_ATI_GART_PCIE: case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc); page_base >>= 8;
page_base |= (upper_32_bits(entry_addr) & 0xff) << 24;
page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
break; break;
default: default:
*pci_gart = cpu_to_le32(page_base); case DRM_ATI_GART_PCI:
break; break;
} }
*pci_gart = cpu_to_le32(page_base);
pci_gart++; pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE; entry_addr += ATI_PCIGART_PAGE_SIZE;
} }
} }
DRM_MEMORYBARRIER(); DRM_MEMORYBARRIER();
return 1; ret = 1;
}
done:
int drm_ati_pcigart_cleanup(struct drm_device *dev, gart_info->addr = address;
struct drm_ati_pcigart_info *gart_info) gart_info->bus_addr = bus_address;
{ return ret;
if (dev->sg == NULL) {
DRM_ERROR( "no scatter/gather memory!\n" );
return 0;
}
drm_pci_free(dev, dev->sg->dmah);
return 1;
} }

View File

@ -632,7 +632,7 @@ struct drm_ati_pcigart_info {
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
#endif #endif
#define upper_32_bits(_val) (((u64)(_val)) >> 32) #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
struct drm_driver_info { struct drm_driver_info {
int (*load)(struct drm_device *, unsigned long flags); int (*load)(struct drm_device *, unsigned long flags);
@ -733,11 +733,13 @@ struct drm_device {
/* Locks */ /* Locks */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
struct mtx vbl_lock; /* protects vblank operations */
struct mtx dma_lock; /* protects dev->dma */ struct mtx dma_lock; /* protects dev->dma */
struct mtx irq_lock; /* protects irq condition checks */ struct mtx irq_lock; /* protects irq condition checks */
struct mtx dev_lock; /* protects everything else */ struct mtx dev_lock; /* protects everything else */
#endif #endif
DRM_SPINTYPE drw_lock; DRM_SPINTYPE drw_lock;
DRM_SPINTYPE tsk_lock;
/* Usage Counters */ /* Usage Counters */
int open_count; /* Outstanding files open */ int open_count; /* Outstanding files open */
@ -785,25 +787,21 @@ struct drm_device {
atomic_t context_flag; /* Context swapping flag */ atomic_t context_flag; /* Context swapping flag */
int last_context; /* Last current context */ int last_context; /* Last current context */
int vblank_disable_allowed;
wait_queue_head_t *vbl_queue; /* vblank wait queue */ wait_queue_head_t *vbl_queue; /* vblank wait queue */
atomic_t *_vblank_count; /* number of VBLANK interrupts */ atomic_t *_vblank_count; /* number of VBLANK interrupts */
/* (driver must alloc the right number of counters) */ /* (driver must alloc the right number of counters) */
struct mtx vbl_lock;
struct drm_vbl_sig_list *vbl_sigs; /* signal list to send on VBLANK */ struct drm_vbl_sig_list *vbl_sigs; /* signal list to send on VBLANK */
atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */ atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
u32 *last_vblank; /* protected by dev->vbl_lock, used */ u32 *last_vblank; /* protected by dev->vbl_lock, used */
/* for wraparound handling */ /* for wraparound handling */
u32 *vblank_offset; /* used to track how many vblanks */
int *vblank_enabled; /* so we don't call enable more than */ int *vblank_enabled; /* so we don't call enable more than */
/* once per disable */ /* once per disable */
u32 *vblank_premodeset; /* were lost during modeset */ int *vblank_inmodeset; /* Display driver is setting mode */
struct callout vblank_disable_timer; struct callout vblank_disable_timer;
unsigned long max_vblank_count; /* size of vblank counter register */ u32 max_vblank_count; /* size of vblank counter register */
int num_crtcs; int num_crtcs;
atomic_t vbl_received;
atomic_t vbl_received2;
#ifdef __FreeBSD__ #ifdef __FreeBSD__
struct sigio *buf_sigio; /* Processes waiting for SIGIO */ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
@ -933,7 +931,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc);
u32 drm_vblank_count(struct drm_device *dev, int crtc); u32 drm_vblank_count(struct drm_device *dev, int crtc);
int drm_vblank_get(struct drm_device *dev, int crtc); int drm_vblank_get(struct drm_device *dev, int crtc);
void drm_vblank_put(struct drm_device *dev, int crtc); void drm_vblank_put(struct drm_device *dev, int crtc);
void drm_update_vblank_count(struct drm_device *dev, int crtc);
int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
int drm_vblank_init(struct drm_device *dev, int num_crtcs); int drm_vblank_init(struct drm_device *dev, int num_crtcs);
void drm_vbl_send_signals(struct drm_device *dev, int crtc); void drm_vbl_send_signals(struct drm_device *dev, int crtc);
@ -1090,6 +1087,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr); size_t align, dma_addr_t maxaddr);
void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
#define drm_core_ioremap_wc drm_core_ioremap
/* Inline replacements for DRM_IOREMAP macros */ /* Inline replacements for DRM_IOREMAP macros */
static __inline__ void static __inline__ void
drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)

View File

@ -832,12 +832,12 @@ int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
if (request->count < 0 || request->count > 4096) if (request->count < 0 || request->count > 4096)
return EINVAL; return EINVAL;
DRM_SPINLOCK(&dev->dma_lock);
order = drm_order(request->size); order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return EINVAL; return EINVAL;
DRM_SPINLOCK(&dev->dma_lock);
/* No more allocations after first buffer-using ioctl. */ /* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) { if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock); DRM_SPINUNLOCK(&dev->dma_lock);

View File

@ -125,6 +125,7 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
}; };
@ -202,8 +203,11 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
DRM_DEV_MODE, DRM_DEV_MODE,
"dri/card%d", unit); "dri/card%d", unit);
#if __FreeBSD_version >= 500000 #if __FreeBSD_version >= 500000
mtx_init(&dev->dev_lock, "drm device", NULL, MTX_DEF); mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
mtx_init(&dev->tsk_lock, "drmtsk", NULL, MTX_DEF);
#endif #endif
id_entry = drm_find_description(pci_get_vendor(dev->device), id_entry = drm_find_description(pci_get_vendor(dev->device),
@ -542,6 +546,8 @@ static int drm_load(struct drm_device *dev)
/* Shared code returns -errno. */ /* Shared code returns -errno. */
retcode = -dev->driver.load(dev, retcode = -dev->driver.load(dev,
dev->id_entry->driver_private); dev->id_entry->driver_private);
if (pci_enable_busmaster(dev->device))
DRM_ERROR("Request to enable bus-master failed.\n");
DRM_UNLOCK(); DRM_UNLOCK();
if (retcode != 0) if (retcode != 0)
goto error; goto error;
@ -594,6 +600,9 @@ error:
#ifdef __FreeBSD__ #ifdef __FreeBSD__
destroy_dev(dev->devnode); destroy_dev(dev->devnode);
#if __FreeBSD_version >= 500000 #if __FreeBSD_version >= 500000
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->dev_lock); mtx_destroy(&dev->dev_lock);
#endif #endif
#endif #endif
@ -649,7 +658,14 @@ static void drm_unload(struct drm_device *dev)
delete_unrhdr(dev->drw_unrhdr); delete_unrhdr(dev->drw_unrhdr);
drm_mem_uninit(); drm_mem_uninit();
if (pci_disable_busmaster(dev->device))
DRM_ERROR("Request to disable bus-master failed.\n");
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->dev_lock); mtx_destroy(&dev->dev_lock);
#endif #endif
} }

View File

@ -66,6 +66,129 @@ drm_irq_handler_wrap(DRM_IRQ_ARGS)
} }
#endif #endif
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
int i;
if (callout_pending(&dev->vblank_disable_timer)) {
/* callout was reset */
return;
}
if (!callout_active(&dev->vblank_disable_timer)) {
/* callout was stopped */
return;
}
callout_deactivate(&dev->vblank_disable_timer);
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->last_vblank[i] =
dev->driver.get_vblank_counter(dev, i);
dev->driver.disable_vblank(dev, i);
dev->vblank_enabled[i] = 0;
}
}
}
static void drm_vblank_cleanup(struct drm_device *dev)
{
unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
callout_stop(&dev->vblank_disable_timer);
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
callout_drain(&dev->vblank_disable_timer);
vblank_disable_fn((void *)dev);
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = ENOMEM;
callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_queue)
goto err;
dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_sigs)
goto err;
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->_vblank_count)
goto err;
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vblank_refcount)
goto err;
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_enabled)
goto err;
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_inmodeset)
goto err;
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
TAILQ_INIT(&dev->vbl_sigs[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
dev->vblank_disable_allowed = 0;
return 0;
err:
drm_vblank_cleanup(dev);
return ret;
}
int drm_irq_install(struct drm_device *dev) int drm_irq_install(struct drm_device *dev)
{ {
int retcode; int retcode;
@ -87,8 +210,6 @@ int drm_irq_install(struct drm_device *dev)
dev->context_flag = 0; dev->context_flag = 0;
DRM_SPININIT(&dev->irq_lock, "DRM IRQ lock");
/* Before installing handler */ /* Before installing handler */
dev->driver.irq_preinstall(dev); dev->driver.irq_preinstall(dev);
DRM_UNLOCK(); DRM_UNLOCK();
@ -143,7 +264,6 @@ err:
dev->irqrid = 0; dev->irqrid = 0;
} }
#endif #endif
DRM_SPINUNINIT(&dev->irq_lock);
DRM_UNLOCK(); DRM_UNLOCK();
return retcode; return retcode;
} }
@ -175,7 +295,7 @@ int drm_irq_uninstall(struct drm_device *dev)
#elif defined(__NetBSD__) || defined(__OpenBSD__) #elif defined(__NetBSD__) || defined(__OpenBSD__)
pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh); pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
#endif #endif
DRM_SPINUNINIT(&dev->irq_lock); drm_vblank_cleanup(dev);
return 0; return 0;
} }
@ -208,27 +328,35 @@ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
} }
} }
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
unsigned long irqflags;
int i;
for (i = 0; i < dev->num_crtcs; i++) {
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
dev->driver.disable_vblank(dev, i);
dev->vblank_enabled[i] = 0;
}
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
}
}
u32 drm_vblank_count(struct drm_device *dev, int crtc) u32 drm_vblank_count(struct drm_device *dev, int crtc)
{ {
return atomic_read(&dev->_vblank_count[crtc]) + return atomic_read(&dev->_vblank_count[crtc]);
dev->vblank_offset[crtc]; }
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
u32 cur_vblank, diff;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->last_vblank[crtc], cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add(diff, &dev->_vblank_count[crtc]);
} }
int drm_vblank_get(struct drm_device *dev, int crtc) int drm_vblank_get(struct drm_device *dev, int crtc)
@ -244,8 +372,10 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
ret = dev->driver.enable_vblank(dev, crtc); ret = dev->driver.enable_vblank(dev, crtc);
if (ret) if (ret)
atomic_dec(&dev->vblank_refcount[crtc]); atomic_dec(&dev->vblank_refcount[crtc]);
else else {
dev->vblank_enabled[crtc] = 1; dev->vblank_enabled[crtc] = 1;
drm_update_vblank_count(dev, crtc);
}
} }
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags); DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
@ -254,71 +384,59 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
void drm_vblank_put(struct drm_device *dev, int crtc) void drm_vblank_put(struct drm_device *dev, int crtc)
{ {
unsigned long irqflags;
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Last user schedules interrupt disable */ /* Last user schedules interrupt disable */
atomic_subtract_acq_int(&dev->vblank_refcount[crtc], 1); atomic_subtract_acq_int(&dev->vblank_refcount[crtc], 1);
if (dev->vblank_refcount[crtc] == 0) if (dev->vblank_refcount[crtc] == 0)
callout_reset(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ, callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
(timeout_t *)vblank_disable_fn, (void *)dev); (timeout_t *)vblank_disable_fn, (void *)dev);
}
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
drm_update_vblank_count(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_vbl_send_signals(dev, crtc);
}
void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
u32 cur_vblank, diff;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
if (cur_vblank < dev->last_vblank[crtc]) {
diff = dev->max_vblank_count -
dev->last_vblank[crtc];
diff += cur_vblank;
} else {
diff = cur_vblank - dev->last_vblank[crtc];
}
dev->last_vblank[crtc] = cur_vblank;
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags); DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
atomic_add(diff, &dev->_vblank_count[crtc]);
} }
int drm_modeset_ctl(struct drm_device *dev, void *data, int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_modeset_ctl *modeset = data; struct drm_modeset_ctl *modeset = data;
unsigned long irqflags;
int crtc, ret = 0; int crtc, ret = 0;
u32 new;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
crtc = modeset->crtc; crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) { if (crtc >= dev->num_crtcs) {
ret = -EINVAL; ret = EINVAL;
goto out; goto out;
} }
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
switch (modeset->cmd) { switch (modeset->cmd) {
case _DRM_PRE_MODESET: case _DRM_PRE_MODESET:
dev->vblank_premodeset[crtc] = if (!dev->vblank_inmodeset[crtc]) {
dev->driver.get_vblank_counter(dev, crtc); dev->vblank_inmodeset[crtc] = 1;
drm_vblank_get(dev, crtc);
}
break; break;
case _DRM_POST_MODESET: case _DRM_POST_MODESET:
new = dev->driver.get_vblank_counter(dev, crtc); if (dev->vblank_inmodeset[crtc]) {
dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new; DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_inmodeset[crtc] = 0;
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
drm_vblank_put(dev, crtc);
}
break; break;
default: default:
ret = -EINVAL; ret = EINVAL;
break; break;
} }
@ -329,7 +447,6 @@ out:
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ {
drm_wait_vblank_t *vblwait = data; drm_wait_vblank_t *vblwait = data;
struct timeval now;
int ret = 0; int ret = 0;
int flags, seq, crtc; int flags, seq, crtc;
@ -350,7 +467,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
if (crtc >= dev->num_crtcs) if (crtc >= dev->num_crtcs)
return EINVAL; return EINVAL;
drm_update_vblank_count(dev, crtc); ret = drm_vblank_get(dev, crtc);
if (ret)
return ret;
seq = drm_vblank_count(dev, crtc); seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
@ -360,7 +479,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
case _DRM_VBLANK_ABSOLUTE: case _DRM_VBLANK_ABSOLUTE:
break; break;
default: default:
return EINVAL; ret = EINVAL;
goto done;
} }
if ((flags & _DRM_VBLANK_NEXTONMISS) && if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@ -381,124 +501,33 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
vblwait->reply.sequence = atomic_read(&dev->vbl_received); vblwait->reply.sequence = atomic_read(&dev->vbl_received);
DRM_SPINLOCK(&dev->irq_lock); DRM_SPINLOCK(&dev->vbl_lock);
TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link); TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
DRM_SPINUNLOCK(&dev->irq_lock); DRM_SPINUNLOCK(&dev->vbl_lock);
ret = 0; ret = 0;
#endif #endif
ret = EINVAL; ret = EINVAL;
} else { } else {
unsigned long cur_vblank;
DRM_LOCK(); DRM_LOCK();
/* shared code returns -errno */ /* shared code returns -errno */
ret = drm_vblank_get(dev, crtc);
if (ret)
return ret;
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
(((cur_vblank = drm_vblank_count(dev, crtc)) ((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23))); - vblwait->request.sequence) <= (1 << 23)));
drm_vblank_put(dev, crtc);
DRM_UNLOCK(); DRM_UNLOCK();
microtime(&now); if (ret != EINTR) {
vblwait->reply.tval_sec = now.tv_sec; struct timeval now;
vblwait->reply.tval_usec = now.tv_usec;
microtime(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
}
} }
return ret; done:
} drm_vblank_put(dev, crtc);
static void drm_vblank_cleanup(struct drm_device *dev)
{
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
callout_stop(&dev->vblank_disable_timer);
vblank_disable_fn((void *)dev);
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
callout_init(&dev->vblank_disable_timer, 0);
DRM_SPININIT(&dev->vbl_lock, "drm_vblk");
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_queue)
goto err;
dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_sigs)
goto err;
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->_vblank_count)
goto err;
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vblank_refcount)
goto err;
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_enabled)
goto err;
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
DRM_MEM_DRIVER);
if (!dev->vblank_premodeset)
goto err;
dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->vblank_offset)
goto err;
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
TAILQ_INIT(&dev->vbl_sigs[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
return 0;
err:
drm_vblank_cleanup(dev);
return ret; return ret;
} }
@ -530,45 +559,53 @@ void drm_vbl_send_signals(struct drm_device *dev, int crtc )
} }
#endif #endif
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_vbl_send_signals(dev, crtc);
}
static void drm_locked_task(void *context, int pending __unused) static void drm_locked_task(void *context, int pending __unused)
{ {
struct drm_device *dev = context; struct drm_device *dev = context;
DRM_LOCK(); DRM_SPINLOCK(&dev->tsk_lock);
for (;;) {
int ret;
if (drm_lock_take(&dev->lock.hw_lock->lock, DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */
DRM_KERNEL_CONTEXT)) if (dev->locked_task_call == NULL ||
{ drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) {
dev->lock.file_priv = NULL; /* kernel owned */ DRM_UNLOCK();
dev->lock.lock_time = jiffies; DRM_SPINUNLOCK(&dev->tsk_lock);
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); return;
break; /* Got lock */
}
/* Contention */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
PZERO | PCATCH, "drmlk2", 0);
#else
ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
"drmlk2", 0);
#endif
if (ret != 0)
return;
} }
dev->lock.file_priv = NULL; /* kernel owned */
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
DRM_UNLOCK(); DRM_UNLOCK();
dev->locked_task_call(dev); dev->locked_task_call(dev);
drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
dev->locked_task_call = NULL;
DRM_SPINUNLOCK(&dev->tsk_lock);
} }
void void
drm_locked_tasklet(struct drm_device *dev, drm_locked_tasklet(struct drm_device *dev,
void (*tasklet)(struct drm_device *dev)) void (*tasklet)(struct drm_device *dev))
{ {
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
}
dev->locked_task_call = tasklet; dev->locked_task_call = tasklet;
DRM_SPINUNLOCK(&dev->tsk_lock);
taskqueue_enqueue(taskqueue_swi, &dev->locked_task); taskqueue_enqueue(taskqueue_swi, &dev->locked_task);
} }

View File

@ -180,6 +180,13 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context) _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
return EINVAL; return EINVAL;
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
dev->locked_task_call(dev);
dev->locked_task_call = NULL;
}
DRM_SPINUNLOCK(&dev->tsk_lock);
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK(); DRM_LOCK();

View File

@ -71,6 +71,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
return NULL; return NULL;
#ifdef __FreeBSD__ #ifdef __FreeBSD__
DRM_UNLOCK();
ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */ ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */ NULL, NULL, /* filtfunc, filtfuncargs */
@ -79,6 +80,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
&dmah->tag); &dmah->tag);
if (ret != 0) { if (ret != 0) {
free(dmah, M_DRM); free(dmah, M_DRM);
DRM_LOCK();
return NULL; return NULL;
} }
@ -87,9 +89,10 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
if (ret != 0) { if (ret != 0) {
bus_dma_tag_destroy(dmah->tag); bus_dma_tag_destroy(dmah->tag);
free(dmah, M_DRM); free(dmah, M_DRM);
DRM_LOCK();
return NULL; return NULL;
} }
DRM_LOCK();
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size, ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
drm_pci_busdma_callback, dmah, 0); drm_pci_busdma_callback, dmah, 0);
if (ret != 0) { if (ret != 0) {

View File

@ -3,7 +3,7 @@
.PATH: ${.CURDIR}/.. .PATH: ${.CURDIR}/..
KMOD = i915 KMOD = i915
NO_MAN = YES NO_MAN = YES
SRCS = i915_dma.c i915_drv.c i915_irq.c i915_mem.c SRCS = i915_dma.c i915_drv.c i915_irq.c i915_mem.c i915_suspend.c
SRCS += device_if.h bus_if.h pci_if.h opt_drm.h SRCS += device_if.h bus_if.h pci_if.h opt_drm.h
CFLAGS += ${DEBUG_FLAGS} -I. -I.. CFLAGS += ${DEBUG_FLAGS} -I. -I..

View File

@ -40,10 +40,38 @@ static drm_pci_id_list_t i915_pciidlist[] = {
i915_PCI_IDS i915_PCI_IDS
}; };
static int i915_suspend(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev || !dev_priv) {
DRM_ERROR("dev: 0x%lx, dev_priv: 0x%lx\n",
(unsigned long) dev, (unsigned long) dev_priv);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
i915_save_state(dev);
return (bus_generic_suspend(nbdev));
}
static int i915_resume(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
i915_restore_state(dev);
return (bus_generic_resume(nbdev));
}
static void i915_configure(struct drm_device *dev) static void i915_configure(struct drm_device *dev)
{ {
dev->driver.buf_priv_size = 1; /* No dev_priv */ dev->driver.buf_priv_size = sizeof(drm_i915_private_t);
dev->driver.load = i915_driver_load; dev->driver.load = i915_driver_load;
dev->driver.unload = i915_driver_unload;
dev->driver.firstopen = i915_driver_firstopen;
dev->driver.preclose = i915_driver_preclose; dev->driver.preclose = i915_driver_preclose;
dev->driver.lastclose = i915_driver_lastclose; dev->driver.lastclose = i915_driver_lastclose;
dev->driver.device_is_agp = i915_driver_device_is_agp; dev->driver.device_is_agp = i915_driver_device_is_agp;
@ -94,6 +122,8 @@ static device_method_t i915_methods[] = {
/* Device interface */ /* Device interface */
DEVMETHOD(device_probe, i915_probe), DEVMETHOD(device_probe, i915_probe),
DEVMETHOD(device_attach, i915_attach), DEVMETHOD(device_attach, i915_attach),
DEVMETHOD(device_suspend, i915_suspend),
DEVMETHOD(device_resume, i915_resume),
DEVMETHOD(device_detach, drm_detach), DEVMETHOD(device_detach, drm_detach),
{ 0, 0 } { 0, 0 }

1
bsd-core/i915_suspend.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/i915_suspend.c

1
bsd-core/radeon_microcode.h Symbolic link
View File

@ -0,0 +1 @@
../shared-core/radeon_microcode.h

View File

@ -21,10 +21,11 @@ r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_execbuf.o i915_gem.o \ i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
i915_gem.o i915_opregion.o \
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \ intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \ intel_tv.o intel_dvo.o dvo_ch7xxx.o \
dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \

View File

@ -962,6 +962,14 @@ struct drm_device {
/** \name VBLANK IRQ support */ /** \name VBLANK IRQ support */
/*@{ */ /*@{ */
/*
* At load time, disabling the vblank interrupt won't be allowed since
* old clients may not call the modeset ioctl and therefore misbehave.
* Once the modeset ioctl *has* been called though, we can safely
* disable them when unused.
*/
int vblank_disable_allowed;
wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
spinlock_t vbl_lock; spinlock_t vbl_lock;
@ -970,13 +978,12 @@ struct drm_device {
atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */ atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
u32 *last_vblank; /* protected by dev->vbl_lock, used */ u32 *last_vblank; /* protected by dev->vbl_lock, used */
/* for wraparound handling */ /* for wraparound handling */
u32 *vblank_offset; /* used to track how many vblanks */
int *vblank_enabled; /* so we don't call enable more than int *vblank_enabled; /* so we don't call enable more than
once per disable */ once per disable */
u32 *vblank_premodeset; /* were lost during modeset */ int *vblank_inmodeset; /* Display driver is setting mode */
struct timer_list vblank_disable_timer; struct timer_list vblank_disable_timer;
unsigned long max_vblank_count; /**< size of vblank counter register */ u32 max_vblank_count; /**< size of vblank counter register */
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev); void (*locked_tasklet_func)(struct drm_device *dev);
@ -1314,7 +1321,6 @@ extern int drm_wait_hotplug(struct drm_device *dev, void *data, struct drm_file
extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq); extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
extern void drm_handle_vblank(struct drm_device *dev, int crtc); extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern void drm_handle_hotplug(struct drm_device *dev); extern void drm_handle_hotplug(struct drm_device *dev);
extern int drm_vblank_get(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc);
@ -1505,6 +1511,7 @@ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,

View File

@ -394,4 +394,8 @@ extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
on_each_cpu(handler, data, wait, 1) on_each_cpu(handler, data, wait, 1)
#endif #endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
#define drm_core_ioremap_wc drm_core_ioremap
#endif
#endif #endif

View File

@ -635,9 +635,10 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
ioctl = &drm_ioctls[nr]; ioctl = &drm_ioctls[nr];
else { cmd = ioctl->cmd;
} else {
retcode = -EINVAL; retcode = -EINVAL;
goto err_i1; goto err_i1;
} }
@ -654,6 +655,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err_i1; goto err_i1;
} }
#endif #endif
func = ioctl->func; func = ioctl->func;
/* is there a local override? */ /* is there a local override? */
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
@ -679,7 +681,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retcode = func(dev, kdata, file_priv); retcode = func(dev, kdata, file_priv);
} }
if ((retcode == 0) && (cmd & IOC_OUT)) { if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata, if (copy_to_user((void __user *)arg, kdata,
_IOC_SIZE(cmd)) != 0) _IOC_SIZE(cmd)) != 0)
retcode = -EFAULT; retcode = -EFAULT;

View File

@ -77,10 +77,16 @@ static void vblank_disable_fn(unsigned long arg)
unsigned long irqflags; unsigned long irqflags;
int i; int i;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) { for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags); spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 && if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) { dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->last_vblank[i] =
dev->driver->get_vblank_counter(dev, i);
dev->driver->disable_vblank(dev, i); dev->driver->disable_vblank(dev, i);
dev->vblank_enabled[i] = 0; dev->vblank_enabled[i] = 0;
} }
@ -118,13 +124,9 @@ static void drm_vblank_cleanup(struct drm_device *dev)
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
if (dev->vblank_premodeset) if (dev->vblank_inmodeset)
drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) * drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER); dev->num_crtcs, DRM_MEM_DRIVER);
if (dev->vblank_offset)
drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
DRM_MEM_DRIVER);
} }
int drm_vblank_init(struct drm_device *dev, int num_crtcs) int drm_vblank_init(struct drm_device *dev, int num_crtcs)
@ -167,13 +169,9 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->last_vblank) if (!dev->last_vblank)
goto err; goto err;
dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32), dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
if (!dev->vblank_premodeset) if (!dev->vblank_inmodeset)
goto err;
dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->vblank_offset)
goto err; goto err;
/* Zero per-crtc vblank stuff */ /* Zero per-crtc vblank stuff */
@ -184,6 +182,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
atomic_set(&dev->vblank_refcount[i], 0); atomic_set(&dev->vblank_refcount[i], 0);
} }
dev->vblank_disable_allowed = 0;
return 0; return 0;
err: err:
@ -426,8 +426,7 @@ int drm_control(struct drm_device *dev, void *data,
*/ */
u32 drm_vblank_count(struct drm_device *dev, int crtc) u32 drm_vblank_count(struct drm_device *dev, int crtc)
{ {
return atomic_read(&dev->_vblank_count[crtc]) + return atomic_read(&dev->_vblank_count[crtc]);
dev->vblank_offset[crtc];
} }
EXPORT_SYMBOL(drm_vblank_count); EXPORT_SYMBOL(drm_vblank_count);
@ -440,10 +439,15 @@ EXPORT_SYMBOL(drm_vblank_count);
* (specified by @crtc). Deal with wraparound, if it occurred, and * (specified by @crtc). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next * update the last read value so we can deal with wraparound on the next
* call if necessary. * call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/ */
void drm_update_vblank_count(struct drm_device *dev, int crtc) static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{ {
unsigned long irqflags;
u32 cur_vblank, diff; u32 cur_vblank, diff;
/* /*
@ -454,20 +458,19 @@ void drm_update_vblank_count(struct drm_device *dev, int crtc)
* a long time. * a long time.
*/ */
cur_vblank = dev->driver->get_vblank_counter(dev, crtc); cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
spin_lock_irqsave(&dev->vbl_lock, irqflags); diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) { if (cur_vblank < dev->last_vblank[crtc]) {
diff = dev->max_vblank_count - diff += dev->max_vblank_count;
dev->last_vblank[crtc];
diff += cur_vblank; DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
} else { crtc, dev->last_vblank[crtc], cur_vblank, diff);
diff = cur_vblank - dev->last_vblank[crtc];
} }
dev->last_vblank[crtc] = cur_vblank;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add(diff, &dev->_vblank_count[crtc]); atomic_add(diff, &dev->_vblank_count[crtc]);
} }
EXPORT_SYMBOL(drm_update_vblank_count);
/** /**
* drm_vblank_get - get a reference count on vblank events * drm_vblank_get - get a reference count on vblank events
@ -475,9 +478,7 @@ EXPORT_SYMBOL(drm_update_vblank_count);
* @crtc: which CRTC to own * @crtc: which CRTC to own
* *
* Acquire a reference count on vblank events to avoid having them disabled * Acquire a reference count on vblank events to avoid having them disabled
* while in use. Note callers will probably want to update the master counter * while in use.
* using drm_update_vblank_count() above before calling this routine so that
* wakeups occur on the right vblank event.
* *
* RETURNS * RETURNS
* Zero on success, nonzero on failure. * Zero on success, nonzero on failure.
@ -487,15 +488,17 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
unsigned long irqflags; unsigned long irqflags;
int ret = 0; int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags); spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */ /* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
!dev->vblank_enabled[crtc]) { !dev->vblank_enabled[crtc]) {
ret = dev->driver->enable_vblank(dev, crtc); ret = dev->driver->enable_vblank(dev, crtc);
if (ret) if (ret)
atomic_dec(&dev->vblank_refcount[crtc]); atomic_dec(&dev->vblank_refcount[crtc]);
else else {
dev->vblank_enabled[crtc] = 1; dev->vblank_enabled[crtc] = 1;
drm_update_vblank_count(dev, crtc);
}
} }
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@ -525,13 +528,21 @@ EXPORT_SYMBOL(drm_vblank_put);
* *
* Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
* ioctls around modesetting so that any lost vblank events are accounted for. * ioctls around modesetting so that any lost vblank events are accounted for.
*
* Generally the counter will reset across mode sets. If interrupts are
* enabled around this call, we don't have to do anything since the counter
* will have already been incremented.
*/ */
int drm_modeset_ctl(struct drm_device *dev, void *data, int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_modeset_ctl *modeset = data; struct drm_modeset_ctl *modeset = data;
unsigned long irqflags;
int crtc, ret = 0; int crtc, ret = 0;
u32 new;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
crtc = modeset->crtc; crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) { if (crtc >= dev->num_crtcs) {
@ -539,14 +550,28 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
goto out; goto out;
} }
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
switch (modeset->cmd) { switch (modeset->cmd) {
case _DRM_PRE_MODESET: case _DRM_PRE_MODESET:
dev->vblank_premodeset[crtc] = if (!dev->vblank_inmodeset[crtc]) {
dev->driver->get_vblank_counter(dev, crtc); dev->vblank_inmodeset[crtc] = 1;
drm_vblank_get(dev, crtc);
}
break; break;
case _DRM_POST_MODESET: case _DRM_POST_MODESET:
new = dev->driver->get_vblank_counter(dev, crtc); if (dev->vblank_inmodeset[crtc]) {
dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new; spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_inmodeset[crtc] = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
drm_vblank_put(dev, crtc);
}
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
@ -580,7 +605,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
union drm_wait_vblank *vblwait = data; union drm_wait_vblank *vblwait = data;
struct timeval now;
int ret = 0; int ret = 0;
unsigned int flags, seq, crtc; unsigned int flags, seq, crtc;
@ -601,7 +625,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (crtc >= dev->num_crtcs) if (crtc >= dev->num_crtcs)
return -EINVAL; return -EINVAL;
drm_update_vblank_count(dev, crtc); ret = drm_vblank_get(dev, crtc);
if (ret)
return ret;
seq = drm_vblank_count(dev, crtc); seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
@ -611,7 +637,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
case _DRM_VBLANK_ABSOLUTE: case _DRM_VBLANK_ABSOLUTE:
break; break;
default: default:
return -EINVAL; ret = -EINVAL;
goto done;
} }
if ((flags & _DRM_VBLANK_NEXTONMISS) && if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@ -644,15 +671,18 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (atomic_read(&dev->vbl_signal_pending) >= 100) { if (atomic_read(&dev->vbl_signal_pending) >= 100) {
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return -EBUSY; ret = -EBUSY;
goto done;
} }
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
if (!vbl_sig) if (!vbl_sig) {
return -ENOMEM; ret = -ENOMEM;
goto done;
}
ret = drm_vblank_get(dev, crtc); ret = drm_vblank_get(dev, crtc);
if (ret) { if (ret) {
@ -675,23 +705,23 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
vblwait->reply.sequence = seq; vblwait->reply.sequence = seq;
} else { } else {
unsigned long cur_vblank;
ret = drm_vblank_get(dev, crtc);
if (ret)
return ret;
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
(((cur_vblank = drm_vblank_count(dev, crtc)) ((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23))); - vblwait->request.sequence) <= (1 << 23)));
drm_vblank_put(dev, crtc);
do_gettimeofday(&now);
vblwait->reply.tval_sec = now.tv_sec; if (ret != -EINTR) {
vblwait->reply.tval_usec = now.tv_usec; struct timeval now;
vblwait->reply.sequence = cur_vblank;
do_gettimeofday(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
}
} }
done: done:
drm_vblank_put(dev, crtc);
return ret; return ret;
} }
@ -745,7 +775,7 @@ static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
*/ */
void drm_handle_vblank(struct drm_device *dev, int crtc) void drm_handle_vblank(struct drm_device *dev, int crtc)
{ {
drm_update_vblank_count(dev, crtc); atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]); DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_vbl_send_signals(dev, crtc); drm_vbl_send_signals(dev, crtc);
} }

View File

@ -350,6 +350,15 @@ void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
} }
EXPORT_SYMBOL_GPL(drm_core_ioremap); EXPORT_SYMBOL_GPL(drm_core_ioremap);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
{
map->handle = ioremap_wc(map->offset, map->size);
}
EXPORT_SYMBOL_GPL(drm_core_ioremap_wc);
#endif
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
{ {
if (!map->handle || !map->size) if (!map->handle || !map->size)

View File

@ -272,7 +272,6 @@ static void i915_restore_vga(struct drm_device *dev)
static int i915_suspend(struct drm_device *dev, pm_message_t state) static int i915_suspend(struct drm_device *dev, pm_message_t state)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (!dev || !dev_priv) { if (!dev || !dev_priv) {
printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
@ -284,122 +283,12 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0; return 0;
pci_save_state(dev->pdev); pci_save_state(dev->pdev);
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Display arbitration control */ i915_save_state(dev);
dev_priv->saveDSPARB = I915_READ(DSPARB);
/* Pipe & plane A info */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
dev_priv->savePIPEACONF = I915_READ(PIPEACONF); intel_opregion_free(dev);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC); #endif
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(IIR);
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
if (state.event == PM_EVENT_SUSPEND) { if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */ /* Shut down the device */
@ -412,158 +301,17 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
static int i915_resume(struct drm_device *dev) static int i915_resume(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_set_power_state(dev->pdev, PCI_D0); pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev); pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev)) if (pci_enable_device(dev->pdev))
return -1; return -1;
pci_set_master(dev->pdev);
DRM_INFO("resuming i915\n"); i915_restore_state(dev);
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
intel_opregion_init(dev);
I915_WRITE(DSPARB, dev_priv->saveDSPARB); #endif
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
DRM_INFO("restoring DPLL_B: 0x%08x\n", dev_priv->saveDPLL_B);
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
DRM_INFO("restoring DPLL_B: 0x%08x\n", dev_priv->saveDPLL_B);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
udelay(150);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
return 0; return 0;
} }

390
linux-core/i915_opregion.c Normal file
View File

@ -0,0 +1,390 @@
/*
*
* Copyright 2008 Intel Corporation <hong.liu@intel.com>
* Copyright 2008 Red Hat <mjg@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/acpi.h>
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
#define PCI_ASLE 0xe4
#define PCI_ASLS 0xfc
#define OPREGION_SZ (8*1024)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x1000
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
/* _DOD id definitions */
#define OUTPUT_CONNECTOR_MSK 0xf000
#define OUTPUT_CONNECTOR_OFFSET 12
#define OUTPUT_PORT_MSK 0x00f0
#define OUTPUT_PORT_OFFSET 4
#define OUTPUT_PORT_ANALOG 0
#define OUTPUT_PORT_LVDS 1
#define OUTPUT_PORT_SDVOB 2
#define OUTPUT_PORT_SDVOC 3
#define OUTPUT_PORT_TV 4
#define OUTPUT_DISPLAY_MSK 0x0f00
#define OUTPUT_DISPLAY_OFFSET 8
#define OUTPUT_DISPLAY_OTHER 0
#define OUTPUT_DISPLAY_VGA 1
#define OUTPUT_DISPLAY_TV 2
#define OUTPUT_DISPLAY_DIGI 3
#define OUTPUT_DISPLAY_FLAT_PANEL 4
/* predefined id for integrated LVDS and VGA connector */
#define OUTPUT_INT_LVDS 0x00000110
#define OUTPUT_INT_VGA 0x80000100
struct opregion_header {
u8 signature[16];
u32 size;
u32 opregion_ver;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
} __attribute__((packed));
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
u32 drdy; /* driver readiness */
u32 csts; /* notification status */
u32 cevt; /* current event */
u8 rsvd1[20];
u32 didl[8]; /* supported display devices ID list */
u32 cpdl[8]; /* currently presented display list */
u32 cadl[8]; /* currently active display list */
u32 nadl[8]; /* next active devices list */
u32 aslp; /* ASL sleep time-out */
u32 tidx; /* toggle table index */
u32 chpd; /* current hotplug enable indicator */
u32 clid; /* current lid state*/
u32 cdck; /* current docking state */
u32 sxsw; /* Sx state resume */
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
} __attribute__((packed));
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
u32 scic; /* SWSCI command|status|data */
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
} __attribute__((packed));
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
u32 ardy; /* driver readiness */
u32 aslc; /* ASLE interrupt command */
u32 tche; /* technology enabled indicator */
u32 alsi; /* current ALS illuminance reading */
u32 bclp; /* backlight brightness to set */
u32 pfit; /* panel fitting state */
u32 cblv; /* current brightness level */
u16 bclm[20]; /* backlight level duty cycle mapping table */
u32 cpfm; /* current panel fitting mode */
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
u8 rsvd[102];
} __attribute__((packed));
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
#define ASLE_SET_PFIT (1 << 2)
#define ASLE_SET_PWM_FREQ (1 << 3)
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAIL (2<<10)
#define ASLE_BACKLIGHT_FAIL (2<<12)
#define ASLE_PFIT_FAIL (2<<14)
#define ASLE_PWM_FREQ_FAIL (2<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
/* ASLE panel fitting request */
#define ASLE_PFIT_VALID (1<<31)
#define ASLE_PFIT_CENTER (1<<0)
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
#define ASLE_PFIT_STRETCH_GFX (1<<2)
/* PWM frequency and minimum brightness */
#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
#define ASLE_PFMB_PWM_VALID (1<<31)
#define ASLE_CBLV_VALID (1<<31)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 blc_pwm_ctl;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAIL;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1));
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
{
return 0;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pfmb & ASLE_PFMB_PWM_VALID) {
u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
pwm = pwm >> 9;
// FIXME - what do we do with the PWM?
}
return 0;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
{
if (!(pfit & ASLE_PFIT_VALID))
return ASLE_PFIT_FAIL;
return 0;
}
void opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM)
asle_stat |= asle_set_als_illum(dev, asle->alsi);
if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT)
asle_stat |= asle_set_pfit(dev, asle->pfit);
if (asle_req & ASLE_SET_PWM_FREQ)
asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
asle->aslc = asle_stat;
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
void opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 mask = 0;
if (asle) {
u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
/* Some hardware uses the legacy backlight controller
to signal interrupts, so we need to set up pipe B
to generate an IRQ on writes */
I915_WRITE(PIPEBSTAT, pipeb_stats |=
I915_LEGACY_BLC_EVENT_ENABLE);
mask = I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
} else
mask = I915_ASLE_INTERRUPT;
dev_priv->irq_mask_reg &= ~mask;
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
asle->ardy = 1;
}
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
static struct intel_opregion *system_opregion;
int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
We might want to fix the video driver to be opregion-aware in
future, but right now we just indicate to the firmware that the
request has been handled */
struct opregion_acpi *acpi;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
acpi->csts = 0;
return NOTIFY_OK;
}
static struct notifier_block intel_opregion_notifier = {
.notifier_call = intel_opregion_video_event,
};
int intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void *base;
u32 asls, mboxes;
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
base = ioremap(asls, OPREGION_SZ);
if (!base)
return -ENOMEM;
opregion->header = base;
if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
} else {
DRM_DEBUG("Public ACPI methods not supported\n");
err = -ENOTSUPP;
goto err_out;
}
opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
}
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
return 0;
err_out:
iounmap(opregion->header);
opregion->header = NULL;
return err;
}
void intel_opregion_free(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->enabled)
return;
opregion->acpi->drdy = 0;
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->enabled = 0;
}
#endif

1
linux-core/i915_suspend.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/i915_suspend.c

View File

@ -59,6 +59,28 @@ static int dri_library_name(struct drm_device * dev, char * buf)
"r300")); "r300"));
} }
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
return 0;
}
static int radeon_resume(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
/* Restore interrupt registers */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
return 0;
}
static struct pci_device_id pciidlist[] = { static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS radeon_PCI_IDS
}; };
@ -94,6 +116,8 @@ static struct drm_driver driver = {
.postclose = radeon_driver_postclose, .postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose, .lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload, .unload = radeon_driver_unload,
.suspend = radeon_suspend,
.resume = radeon_resume,
.get_vblank_counter = radeon_get_vblank_counter, .get_vblank_counter = radeon_get_vblank_counter,
.enable_vblank = radeon_enable_vblank, .enable_vblank = radeon_enable_vblank,
.disable_vblank = radeon_disable_vblank, .disable_vblank = radeon_disable_vblank,

View File

@ -3953,7 +3953,6 @@
# define R300_TILE_SIZE_32 (2 << 4) # define R300_TILE_SIZE_32 (2 << 4)
# define R300_SUBPIXEL_1_12 (0 << 16) # define R300_SUBPIXEL_1_12 (0 << 16)
# define R300_SUBPIXEL_1_16 (1 << 16) # define R300_SUBPIXEL_1_16 (1 << 16)
#define R300_GB_SELECT 0x401c
#define R300_GB_ENABLE 0x4008 #define R300_GB_ENABLE 0x4008
#define R300_GB_AA_CONFIG 0x4020 #define R300_GB_AA_CONFIG 0x4020
#define R400_GB_PIPE_SELECT 0x402c #define R400_GB_PIPE_SELECT 0x402c
@ -4031,17 +4030,6 @@
# define R300_GL_CLIP_SPACE_DEF (0 << 22) # define R300_GL_CLIP_SPACE_DEF (0 << 22)
# define R300_DX_CLIP_SPACE_DEF (1 << 22) # define R300_DX_CLIP_SPACE_DEF (1 << 22)
# define R500_TCL_STATE_OPTIMIZATION (1 << 23) # define R500_TCL_STATE_OPTIMIZATION (1 << 23)
#define R300_VAP_VTE_CNTL 0x20B0
# define R300_VPORT_X_SCALE_ENA (1 << 0)
# define R300_VPORT_X_OFFSET_ENA (1 << 1)
# define R300_VPORT_Y_SCALE_ENA (1 << 2)
# define R300_VPORT_Y_OFFSET_ENA (1 << 3)
# define R300_VPORT_Z_SCALE_ENA (1 << 4)
# define R300_VPORT_Z_OFFSET_ENA (1 << 5)
# define R300_VTX_XY_FMT (1 << 8)
# define R300_VTX_Z_FMT (1 << 9)
# define R300_VTX_W0_FMT (1 << 10)
#define R300_VAP_VTX_STATE_CNTL 0x2180
#define R300_VAP_PSC_SGN_NORM_CNTL 0x21DC #define R300_VAP_PSC_SGN_NORM_CNTL 0x21DC
#define R300_VAP_PROG_STREAM_CNTL_0 0x2150 #define R300_VAP_PROG_STREAM_CNTL_0 0x2150
# define R300_DATA_TYPE_0_SHIFT 0 # define R300_DATA_TYPE_0_SHIFT 0
@ -4431,8 +4419,6 @@
# define R300_ENDIAN_SWAP_HALF_DWORD (3 << 0) # define R300_ENDIAN_SWAP_HALF_DWORD (3 << 0)
# define R300_MACRO_TILE (1 << 2) # define R300_MACRO_TILE (1 << 2)
#define R300_TX_BORDER_COLOR_0 0x45c0
#define R300_TX_ENABLE 0x4104 #define R300_TX_ENABLE 0x4104
# define R300_TEX_0_ENABLE (1 << 0) # define R300_TEX_0_ENABLE (1 << 0)
# define R300_TEX_1_ENABLE (1 << 1) # define R300_TEX_1_ENABLE (1 << 1)
@ -4719,24 +4705,7 @@
# define R300_READ_ENABLE (1 << 2) # define R300_READ_ENABLE (1 << 2)
#define R300_RB3D_ABLENDCNTL 0x4e08 #define R300_RB3D_ABLENDCNTL 0x4e08
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
#define R300_RB3D_COLOROFFSET0 0x4e28
#define R300_RB3D_COLORPITCH0 0x4e38
# define R300_COLORTILE (1 << 16)
# define R300_COLORENDIAN_WORD (1 << 19)
# define R300_COLORENDIAN_DWORD (2 << 19)
# define R300_COLORENDIAN_HALF_DWORD (3 << 19)
# define R300_COLORFORMAT_ARGB1555 (3 << 21)
# define R300_COLORFORMAT_RGB565 (4 << 21)
# define R300_COLORFORMAT_ARGB8888 (6 << 21)
# define R300_COLORFORMAT_ARGB32323232 (7 << 21)
# define R300_COLORFORMAT_I8 (9 << 21)
# define R300_COLORFORMAT_ARGB16161616 (10 << 21)
# define R300_COLORFORMAT_VYUY (11 << 21)
# define R300_COLORFORMAT_YVYU (12 << 21)
# define R300_COLORFORMAT_UV88 (13 << 21)
# define R300_COLORFORMAT_ARGB4444 (15 << 21)
#define R300_RB3D_AARESOLVE_CTL 0x4e88
#define R300_RB3D_COLOR_CHANNEL_MASK 0x4e0c #define R300_RB3D_COLOR_CHANNEL_MASK 0x4e0c
# define R300_BLUE_MASK_EN (1 << 0) # define R300_BLUE_MASK_EN (1 << 0)
# define R300_GREEN_MASK_EN (1 << 1) # define R300_GREEN_MASK_EN (1 << 1)

View File

@ -148,7 +148,9 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
} }
info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
#ifdef XGI_HAVE_FENCE
drm_fence_flush_old(info->dev, 0, info->next_sequence); drm_fence_flush_old(info->dev, 0, info->next_sequence);
#endif /* XGI_HAVE_FENCE */
return 0; return 0;
} }

View File

@ -37,7 +37,9 @@ static struct pci_device_id pciidlist[] = {
xgi_PCI_IDS xgi_PCI_IDS
}; };
#ifdef XGI_HAVE_FENCE
extern struct drm_fence_driver xgi_fence_driver; extern struct drm_fence_driver xgi_fence_driver;
#endif /* XGI_HAVE_FENCE */
int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
@ -47,6 +49,8 @@ static struct drm_ioctl_desc xgi_ioctls[] = {
DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH),
}; };
static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
@ -58,6 +62,7 @@ static void xgi_driver_lastclose(struct drm_device * dev);
static void xgi_reclaim_buffers_locked(struct drm_device * dev, static void xgi_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file * filp); struct drm_file * filp);
static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
static int xgi_kern_isr_postinstall(struct drm_device * dev);
static struct drm_driver driver = { static struct drm_driver driver = {
@ -70,7 +75,7 @@ static struct drm_driver driver = {
.lastclose = xgi_driver_lastclose, .lastclose = xgi_driver_lastclose,
.dma_quiescent = NULL, .dma_quiescent = NULL,
.irq_preinstall = NULL, .irq_preinstall = NULL,
.irq_postinstall = NULL, .irq_postinstall = xgi_kern_isr_postinstall,
.irq_uninstall = NULL, .irq_uninstall = NULL,
.irq_handler = xgi_kern_isr, .irq_handler = xgi_kern_isr,
.reclaim_buffers = drm_core_reclaim_buffers, .reclaim_buffers = drm_core_reclaim_buffers,
@ -100,7 +105,9 @@ static struct drm_driver driver = {
.remove = __devexit_p(drm_cleanup_pci), .remove = __devexit_p(drm_cleanup_pci),
}, },
#ifdef XGI_HAVE_FENCE
.fence_driver = &xgi_fence_driver, .fence_driver = &xgi_fence_driver,
#endif /* XGI_HAVE_FENCE */
.name = DRIVER_NAME, .name = DRIVER_NAME,
.desc = DRIVER_DESC, .desc = DRIVER_DESC,
@ -355,7 +362,10 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
DRM_WRITE32(info->mmio_map, DRM_WRITE32(info->mmio_map,
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits)); cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
#ifdef XGI_HAVE_FENCE
xgi_fence_handler(dev); xgi_fence_handler(dev);
#endif /* XGI_HAVE_FENCE */
DRM_WAKEUP(&info->fence_queue);
return IRQ_HANDLED; return IRQ_HANDLED;
} else { } else {
return IRQ_NONE; return IRQ_NONE;
@ -363,6 +373,15 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
} }
int xgi_kern_isr_postinstall(struct drm_device * dev)
{
struct xgi_info *info = dev->dev_private;
DRM_INIT_WAITQUEUE(&info->fence_queue);
return 0;
}
int xgi_driver_load(struct drm_device *dev, unsigned long flags) int xgi_driver_load(struct drm_device *dev, unsigned long flags)
{ {
struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);

View File

@ -35,11 +35,11 @@
#define DRIVER_NAME "xgi" #define DRIVER_NAME "xgi"
#define DRIVER_DESC "XGI XP5 / XP10 / XG47" #define DRIVER_DESC "XGI XP5 / XP10 / XG47"
#define DRIVER_DATE "20071003" #define DRIVER_DATE "20080612"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 1 #define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 3 #define DRIVER_PATCHLEVEL 0
#include "xgi_cmdlist.h" #include "xgi_cmdlist.h"
#include "xgi_drm.h" #include "xgi_drm.h"
@ -74,6 +74,7 @@ struct xgi_info {
struct xgi_cmdring_info cmdring; struct xgi_cmdring_info cmdring;
DRM_SPINTYPE fence_lock; DRM_SPINTYPE fence_lock;
wait_queue_head_t fence_queue;
unsigned complete_sequence; unsigned complete_sequence;
unsigned next_sequence; unsigned next_sequence;
}; };
@ -86,7 +87,7 @@ extern int xgi_fb_heap_init(struct xgi_info * info);
extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp); struct drm_file * filp);
extern int xgi_free(struct xgi_info * info, unsigned long index, extern int xgi_free(struct xgi_info * info, unsigned int index,
struct drm_file * filp); struct drm_file * filp);
extern int xgi_pcie_heap_init(struct xgi_info * info); extern int xgi_pcie_heap_init(struct xgi_info * info);
@ -98,12 +99,24 @@ extern void xgi_disable_mmio(struct xgi_info * info);
extern void xgi_enable_ge(struct xgi_info * info); extern void xgi_enable_ge(struct xgi_info * info);
extern void xgi_disable_ge(struct xgi_info * info); extern void xgi_disable_ge(struct xgi_info * info);
/* TTM-style fences.
*/
#ifdef XGI_HAVE_FENCE
extern void xgi_poke_flush(struct drm_device * dev, uint32_t class); extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags, uint32_t * sequence, uint32_t * native_type); uint32_t flags, uint32_t * sequence, uint32_t * native_type);
extern void xgi_fence_handler(struct drm_device * dev); extern void xgi_fence_handler(struct drm_device * dev);
extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
uint32_t flags); uint32_t flags);
#endif /* XGI_HAVE_FENCE */
/* Non-TTM-style fences.
*/
extern int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp); struct drm_file * filp);

View File

@ -93,7 +93,7 @@ int xgi_alloc_ioctl(struct drm_device * dev, void * data,
} }
int xgi_free(struct xgi_info * info, unsigned long index, int xgi_free(struct xgi_info * info, unsigned int index,
struct drm_file * filp) struct drm_file * filp)
{ {
int err; int err;
@ -111,7 +111,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data,
{ {
struct xgi_info *info = dev->dev_private; struct xgi_info *info = dev->dev_private;
return xgi_free(info, *(unsigned long *) data, filp); return xgi_free(info, *(unsigned int *) data, filp);
} }

View File

@ -30,6 +30,76 @@
#include "xgi_misc.h" #include "xgi_misc.h"
#include "xgi_cmdlist.h" #include "xgi_cmdlist.h"
static int xgi_low_level_fence_emit(struct drm_device *dev, u32 *sequence)
{
struct xgi_info *const info = dev->dev_private;
if (info == NULL) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_SPINLOCK(&info->fence_lock);
info->next_sequence++;
if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
info->next_sequence = 1;
}
*sequence = (u32) info->next_sequence;
DRM_SPINUNLOCK(&info->fence_lock);
xgi_emit_irq(info);
return 0;
}
#define GET_BEGIN_ID(i) (le32_to_cpu(DRM_READ32((i)->mmio_map, 0x2820)) \
& BEGIN_BEGIN_IDENTIFICATION_MASK)
static int xgi_low_level_fence_wait(struct drm_device *dev, unsigned *sequence)
{
struct xgi_info *const info = dev->dev_private;
unsigned int cur_fence;
int ret = 0;
if (info == NULL) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using fences.
*/
DRM_WAIT_ON(ret, info->fence_queue, 3 * DRM_HZ,
((((cur_fence = GET_BEGIN_ID(info))
- *sequence) & BEGIN_BEGIN_IDENTIFICATION_MASK)
<= (1 << 18)));
info->complete_sequence = cur_fence;
*sequence = cur_fence;
return ret;
}
int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
(void) filp;
return xgi_low_level_fence_emit(dev, (u32 *) data);
}
int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
(void) filp;
return xgi_low_level_fence_wait(dev, (u32 *) data);
}
#ifdef XGI_HAVE_FENCE
static void xgi_fence_poll(struct drm_device * dev, uint32_t class, static void xgi_fence_poll(struct drm_device * dev, uint32_t class,
uint32_t waiting_types) uint32_t waiting_types)
{ {
@ -68,25 +138,18 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags, uint32_t * sequence, uint32_t flags, uint32_t * sequence,
uint32_t * native_type) uint32_t * native_type)
{ {
struct xgi_info * info = dev->dev_private; int err;
if ((info == NULL) || (class != 0)) (void) flags;
if (class != 0)
return -EINVAL; return -EINVAL;
err = xgi_low_level_fence_emit(dev, sequence);
if (err)
return err;
DRM_SPINLOCK(&info->fence_lock);
info->next_sequence++;
if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
info->next_sequence = 1;
}
DRM_SPINUNLOCK(&info->fence_lock);
xgi_emit_irq(info);
*sequence = (uint32_t) info->next_sequence;
*native_type = DRM_FENCE_TYPE_EXE; *native_type = DRM_FENCE_TYPE_EXE;
return 0; return 0;
} }
@ -120,3 +183,4 @@ struct drm_fence_driver xgi_fence_driver = {
.wait = NULL .wait = NULL
}; };
#endif /* XGI_HAVE_FENCE */

View File

@ -1330,7 +1330,7 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather) #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)

View File

@ -83,7 +83,6 @@
0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22" 0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22"
0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C" 0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464" 0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464"
0x1002 0x5657 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550XTX"
0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800" 0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro" 0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE" 0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
@ -101,9 +100,10 @@
0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26" 0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26"
0x1002 0x5652 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26" 0x1002 0x5652 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26" 0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
0x1002 0x5657 CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon X550XTX"
0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP" 0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP"
0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP" 0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
0x1002 0x5954 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G" 0x1002 0x5954 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G"
0x1002 0x5955 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955" 0x1002 0x5955 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955"
0x1002 0x5974 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200" 0x1002 0x5974 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200"
0x1002 0x5975 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP" 0x1002 0x5975 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP"
@ -113,8 +113,10 @@
0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE" 0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE"
0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI" 0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI"
0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50" 0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50"
0x1002 0x5a61 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200" 0x1002 0x5a41 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART "ATI Radeon XPRESS 200 5A41 (PCIE)"
0x1002 0x5a62 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M" 0x1002 0x5a42 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5A42 (PCIE)"
0x1002 0x5a61 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200"
0x1002 0x5a62 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M"
0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE" 0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro" 0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550" 0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550"
@ -396,7 +398,10 @@
0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33" 0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33"
0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35" 0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35"
0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33" 0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33"
0x8086 0x2A42 CHIP_I9XX|CHIP_I965 "Intel Integrated Graphics Device" 0x8086 0x2A42 CHIP_I9XX|CHIP_I965 "Mobile Intel® GM45 Express Chipset"
0x8086 0x2E02 CHIP_I9XX|CHIP_I965 "Intel Integrated Graphics Device"
0x8086 0x2E12 CHIP_I9XX|CHIP_I965 "Intel Q45/Q43"
0x8086 0x2E22 CHIP_I9XX|CHIP_I965 "Intel G45/G43"
[imagine] [imagine]
0x105d 0x2309 IMAGINE_128 "Imagine 128" 0x105d 0x2309 IMAGINE_128 "Imagine 128"

View File

@ -1049,6 +1049,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n"); " G33 hw status page\n");
return -ENOMEM; return -ENOMEM;
} }
dev_priv->hws_vaddr = dev_priv->hws_map.handle; dev_priv->hws_vaddr = dev_priv->hws_map.handle;
memset(dev_priv->hws_vaddr, 0, PAGE_SIZE); memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);

View File

@ -107,6 +107,23 @@ struct drm_i915_vbl_swap {
struct drm_minor *minor; struct drm_minor *minor;
}; };
#ifdef __linux__
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct intel_opregion {
struct opregion_header *header;
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
int enabled;
};
#endif
struct drm_i915_master_private { struct drm_i915_master_private {
drm_local_map_t *sarea; drm_local_map_t *sarea;
struct drm_i915_sarea *sarea_priv; struct drm_i915_sarea *sarea_priv;
@ -266,6 +283,10 @@ struct drm_i915_private {
struct work_struct user_interrupt_task; struct work_struct user_interrupt_task;
#ifdef __linux__
struct intel_opregion opregion;
#endif
/* Register state */ /* Register state */
u8 saveLBB; u8 saveLBB;
u32 saveDSPACNTR; u32 saveDSPACNTR;
@ -480,6 +501,7 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev); extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device * dev); extern int i915_driver_irq_postinstall(struct drm_device * dev);
extern void i915_driver_irq_uninstall(struct drm_device * dev); extern void i915_driver_irq_uninstall(struct drm_device * dev);
extern void i915_enable_interrupt(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
@ -508,6 +530,11 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev, extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct drm_file *file_priv,
struct mem_block *heap); struct mem_block *heap);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
#ifdef I915_HAVE_FENCE #ifdef I915_HAVE_FENCE
/* i915_fence.c */ /* i915_fence.c */
extern void i915_fence_handler(struct drm_device *dev); extern void i915_fence_handler(struct drm_device *dev);
@ -579,6 +606,14 @@ void i915_gem_retire_work_handler(struct work_struct *work);
extern unsigned int i915_fbpercrtc; extern unsigned int i915_fbpercrtc;
#ifdef __linux__
/* i915_opregion.c */
extern int intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_free(struct drm_device *dev);
extern void opregion_asle_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
#endif
#ifdef __linux__ #ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
extern void intel_init_chipset_flush_compat(struct drm_device *dev); extern void intel_init_chipset_flush_compat(struct drm_device *dev);
@ -596,12 +631,20 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
#if defined(__FreeBSD__)
typedef boolean_t bool;
#endif
#define I915_VERBOSE 0 #define I915_VERBOSE 0
#define I915_RING_VALIDATE 0 #define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024) #define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \ #define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt; volatile char *virt;
@ -740,7 +783,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) /* used to have 1<<22? */ #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2 #define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
@ -805,8 +849,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_16_1555 (2<<24)
#define BLT_DEPTH_32 (3<<24) #define BLT_DEPTH_32 (3<<24)
#define BLT_ROP_GXCOPY (0xcc<<16) #define BLT_ROP_GXCOPY (0xcc<<16)
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22) #define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_A (0<<20)
@ -862,6 +906,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
#define I915_DEBUG_INTERRUPT (1<<2) #define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1) #define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
#define EIR 0x020b0 #define EIR 0x020b0
#define EMR 0x020b4 #define EMR 0x020b4
#define ESR 0x020b8 #define ESR 0x020b8
@ -2050,11 +2095,18 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x29A2 || \ (dev)->pci_device == 0x29A2 || \
(dev)->pci_device == 0x2A02 || \ (dev)->pci_device == 0x2A02 || \
(dev)->pci_device == 0x2A12 || \ (dev)->pci_device == 0x2A12 || \
(dev)->pci_device == 0x2A42) (dev)->pci_device == 0x2A42 || \
(dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22)
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22)
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \ (dev)->pci_device == 0x29B2 || \
@ -2064,8 +2116,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev)) IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev)) #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
#endif #endif

View File

@ -269,7 +269,8 @@ out:
int i915_driver_load(struct drm_device *dev, unsigned long flags) int i915_driver_load(struct drm_device *dev, unsigned long flags)
{ {
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
int ret = 0; int ret = 0, num_pipes = 2;
u32 tmp;
dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER); dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
if (dev_priv == NULL) if (dev_priv == NULL)
@ -333,10 +334,51 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
#ifdef __linux__ #ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev); intel_init_chipset_flush_compat(dev);
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
intel_opregion_init(dev);
#endif #endif
#endif #endif
tmp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, tmp);
tmp = I915_READ(PIPEBSTAT);
I915_WRITE(PIPEBSTAT, tmp);
atomic_set(&dev_priv->irq_received, 0);
I915_WRITE(HWSTAM, 0xeffe);
I915_WRITE(IMR, 0x0);
I915_WRITE(IER, 0x0);
DRM_SPININIT(&dev_priv->swaps_lock, "swap");
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
goto out_rmmap;
ret = drm_hotplug_init(dev);
if (ret)
goto out_rmmap;
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
/*
* Initialize the hardware status page IRQ location.
*/
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev); ret = i915_load_modeset_init(dev);
if (ret < 0) { if (ret < 0) {
@ -344,6 +386,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap; goto out_rmmap;
} }
} }
return 0; return 0;
out_rmmap: out_rmmap:
@ -357,6 +400,23 @@ int i915_driver_unload(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
dev_priv->vblank_pipe = 0;
dev_priv->irq_enabled = 0;
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
temp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, temp);
temp = I915_READ(PIPEBSTAT);
I915_WRITE(PIPEBSTAT, temp);
temp = I915_READ(IIR);
I915_WRITE(IIR, temp);
I915_WRITE(PRB0_CTL, 0); I915_WRITE(PRB0_CTL, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@ -395,15 +455,17 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_lastclose(dev); i915_gem_lastclose(dev);
} }
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
#endif
#endif
DRM_DEBUG("%p\n", dev_priv->mmio_map);
drm_rmmap(dev, dev_priv->mmio_map); drm_rmmap(dev, dev_priv->mmio_map);
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
intel_opregion_free(dev);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_fini_chipset_flush_compat(dev);
#endif
#endif
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL; dev->dev_private = NULL;

View File

@ -192,11 +192,11 @@ static void i915_vblank_tasklet(struct drm_device *dev)
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
RING_LOCALS; RING_LOCALS;
if (sarea_priv->front_tiled) { if (IS_I965G(dev) && sarea_priv->front_tiled) {
cmd |= XY_SRC_COPY_BLT_DST_TILED; cmd |= XY_SRC_COPY_BLT_DST_TILED;
dst_pitch >>= 2; dst_pitch >>= 2;
} }
if (sarea_priv->back_tiled) { if (IS_I965G(dev) && sarea_priv->back_tiled) {
cmd |= XY_SRC_COPY_BLT_SRC_TILED; cmd |= XY_SRC_COPY_BLT_SRC_TILED;
src_pitch >>= 2; src_pitch >>= 2;
} }
@ -394,28 +394,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
} }
} }
#if 0
static int i915_in_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
unsigned long pipedsl, vblank, vtotal;
unsigned long vbl_start, vbl_end, cur_line;
pipedsl = pipe ? PIPEBDSL : PIPEADSL;
vblank = pipe ? VBLANK_B : VBLANK_A;
vtotal = pipe ? VTOTAL_B : VTOTAL_A;
vbl_start = I915_READ(vblank) & VBLANK_START_MASK;
vbl_end = (I915_READ(vblank) >> VBLANK_END_SHIFT) & VBLANK_END_MASK;
cur_line = I915_READ(pipedsl);
if (cur_line >= vbl_start)
return 1;
return 0;
}
#endif
u32 i915_get_vblank_counter(struct drm_device *dev, int plane) u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
{ {
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
@ -449,22 +428,6 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
count = (high1 << 8) | low; count = (high1 << 8) | low;
/*
* If we're in the middle of the vblank period, the
* above regs won't have been updated yet, so return
* an incremented count to stay accurate
*/
#if 0
if (i915_in_vblank(dev, pipe))
count++;
#endif
/* count may be reset by other driver(e.g. 2D driver),
we have no way to know if it is wrapped or resetted
when count is zero. do a rough guess.
*/
if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
dev->last_vblank[pipe] = 0;
return count; return count;
} }
@ -533,23 +496,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
I915_WRITE(IMR, ~0); I915_WRITE(IMR, ~0);
iir = I915_READ(IIR); iir = I915_READ(IIR);
#if 0
DRM_DEBUG("flag=%08x\n", iir);
#endif
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
if (iir == 0) { if (iir == 0) {
DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
iir,
I915_READ(IMR),
I915_READ(IER),
I915_READ(PIPEASTAT),
I915_READ(PIPEBSTAT));
if (dev->pdev->msi_enabled) { if (dev->pdev->msi_enabled) {
I915_WRITE(IMR, I915_WRITE(IMR, dev_priv->irq_mask_reg);
dev_priv->irq_mask_reg);
(void) I915_READ(IMR); (void) I915_READ(IMR);
} }
return IRQ_NONE;
} }
/* /*
@ -588,6 +540,16 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
} }
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
if ((iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) &&
(pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE))
opregion_asle_intr(dev);
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
#endif
#endif
if (iir & I915_USER_INTERRUPT) { if (iir & I915_USER_INTERRUPT) {
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
DRM_WAKEUP(&dev_priv->irq_queue); DRM_WAKEUP(&dev_priv->irq_queue);
@ -602,6 +564,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
drm_handle_vblank(dev, i915_get_plane(dev, 0)); drm_handle_vblank(dev, i915_get_plane(dev, 0));
} }
/* The vblank interrupt gets enabled even if we didn't ask for
it, so make sure it's shut down again */
if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) { PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++; vblank++;
@ -917,9 +884,16 @@ void i915_enable_interrupt (struct drm_device *dev)
} }
} }
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
opregion_enable_asle(dev);
#endif
#endif
I915_WRITE(IMR, dev_priv->irq_mask_reg); I915_WRITE(IMR, dev_priv->irq_mask_reg);
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
(void) I915_READ (IER); (void) I915_READ (IER);
dev_priv->irq_enabled = 1; dev_priv->irq_enabled = 1;
} }
@ -929,20 +903,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_vblank_pipe *pipe = data;
if (!dev_priv) { if (!dev_priv) {
DRM_ERROR("called with no initialization\n"); DRM_ERROR("called with no initialization\n");
return -EINVAL; return -EINVAL;
} }
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
return -EINVAL;
}
dev_priv->vblank_pipe = pipe->pipe;
return 0; return 0;
} }
@ -1032,7 +998,13 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
drm_update_vblank_count(dev, pipe); /*
* We take the ref here and put it when the swap actually completes
* in the tasklet.
*/
ret = drm_vblank_get(dev, pipe);
if (ret)
return ret;
curseq = drm_vblank_count(dev, pipe); curseq = drm_vblank_count(dev, pipe);
if (seqtype == _DRM_VBLANK_RELATIVE) if (seqtype == _DRM_VBLANK_RELATIVE)
@ -1043,6 +1015,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
swap->sequence = curseq + 1; swap->sequence = curseq + 1;
} else { } else {
DRM_DEBUG("Missed target sequence\n"); DRM_DEBUG("Missed target sequence\n");
drm_vblank_put(dev, pipe);
return -EINVAL; return -EINVAL;
} }
} }
@ -1064,6 +1037,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
irqflags); irqflags);
DRM_DEBUG("Invalid drawable ID %d\n", DRM_DEBUG("Invalid drawable ID %d\n",
swap->drawable); swap->drawable);
drm_vblank_put(dev, pipe);
return -EINVAL; return -EINVAL;
} }
@ -1071,6 +1045,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
drm_vblank_put(dev, pipe);
return 0; return 0;
} }
} }
@ -1094,6 +1069,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
if (dev_priv->swaps_pending >= 100) { if (dev_priv->swaps_pending >= 100) {
DRM_DEBUG("Too many swaps queued\n"); DRM_DEBUG("Too many swaps queued\n");
drm_vblank_put(dev, pipe);
return -EBUSY; return -EBUSY;
} }
@ -1101,17 +1077,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
if (!vbl_swap) { if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n"); DRM_ERROR("Failed to allocate memory to queue swap\n");
drm_vblank_put(dev, pipe);
return -ENOMEM; return -ENOMEM;
} }
DRM_DEBUG("\n"); DRM_DEBUG("\n");
ret = drm_vblank_get(dev, pipe);
if (ret) {
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
return ret;
}
vbl_swap->drw_id = swap->drawable; vbl_swap->drw_id = swap->drawable;
vbl_swap->plane = plane; vbl_swap->plane = plane;
vbl_swap->sequence = swap->sequence; vbl_swap->sequence = swap->sequence;
@ -1135,83 +1106,15 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
*/ */
void i915_driver_irq_preinstall(struct drm_device * dev) void i915_driver_irq_preinstall(struct drm_device * dev)
{ {
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; return;
u32 tmp;
tmp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, tmp);
tmp = I915_READ(PIPEBSTAT);
I915_WRITE(PIPEBSTAT, tmp);
atomic_set(&dev_priv->irq_received, 0);
I915_WRITE(HWSTAM, 0xffff);
I915_WRITE(IER, 0x0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IIR, 0xffffffff);
(void) I915_READ(IIR);
} }
int i915_driver_irq_postinstall(struct drm_device * dev) int i915_driver_irq_postinstall(struct drm_device * dev)
{ {
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int ret, num_pipes = 2;
DRM_SPININIT(&dev_priv->swaps_lock, "swap");
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
return ret;
ret = drm_hotplug_init(dev);
if (ret)
return ret;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
/*
* Initialize the hardware status page IRQ location.
*/
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
return 0; return 0;
} }
void i915_driver_irq_uninstall(struct drm_device * dev) void i915_driver_irq_uninstall(struct drm_device * dev)
{ {
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; return;
u32 temp;
if (!dev_priv)
return;
dev_priv->irq_enabled = 1;
temp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, temp);
temp = I915_READ(PIPEBSTAT);
I915_WRITE(PIPEBSTAT, temp);
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
temp = I915_READ(IIR);
I915_WRITE(IIR, temp);
} else {
I915_WRITE16(HWSTAM, 0xffff);
I915_WRITE16(IMR, 0xffff);
I915_WRITE16(IER, 0x0);
temp = I915_READ16(IIR);
I915_WRITE16(IIR, temp);
}
} }

520
shared-core/i915_suspend.c Normal file
View File

@ -0,0 +1,520 @@
/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pipe == PIPE_A)
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
else
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
return I915_READ8(data_port);
}
static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
return I915_READ8(VGA_AR_DATA_READ);
}
static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
I915_WRITE8(VGA_AR_DATA_WRITE, val);
}
static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
I915_WRITE8(data_port, val);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
/* DACCRX automatically increments during read */
I915_WRITE8(VGA_DACRX, 0);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(dev, cr_index, cr_data, 0x11,
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
/* DACCRX automatically increments during read */
I915_WRITE8(VGA_DACWX, 0);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
}
int i915_save_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
#if defined(__FreeBSD__)
dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1);
#else
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
#endif
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
if (IS_I965GM(dev) || IS_GM45(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(IIR);
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
return 0;
}
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
#if defined(__FreeBSD__)
pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
#else
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
#endif
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
return 0;
}

View File

@ -131,7 +131,7 @@ int nouveau_fifo_init(struct drm_device *dev)
/* No cmdbuf object */ /* No cmdbuf object */
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000); NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000); NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
NV_WRITE(NV03_PFIFO_CACHE0_PULL0, 0x00000000); NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000);
NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF); NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF); NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001); NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);

View File

@ -36,7 +36,6 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nv50_kms_wrapper.h" #include "nv50_kms_wrapper.h"
static struct mem_block * static struct mem_block *
split_block(struct mem_block *p, uint64_t start, uint64_t size, split_block(struct mem_block *p, uint64_t start, uint64_t size,
struct drm_file *file_priv) struct drm_file *file_priv)

View File

@ -122,8 +122,8 @@ nv04_fifo_save_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp; uint32_t tmp;
RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET, NV04_PFIFO_CACHE1_DMA_GET); RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE); tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE);

View File

@ -77,6 +77,9 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
return -EFAULT; return -EFAULT;
} }
box.x2--; /* Hardware expects inclusive bottom-right corner */
box.y2--;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
box.x1 = (box.x1) & box.x1 = (box.x1) &
R300_CLIPRECT_MASK; R300_CLIPRECT_MASK;
@ -95,8 +98,8 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
R300_CLIPRECT_MASK; R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK; R300_CLIPRECT_MASK;
} }
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT)); (box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
@ -136,6 +139,18 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
ADVANCE_RING(); ADVANCE_RING();
} }
/* flus cache and wait idle clean after cliprect change */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
return 0; return 0;
} }
@ -166,13 +181,13 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(0x21DC, 1); ADD_RANGE(0x21DC, 1);
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
ADD_RANGE(R300_VAP_CLIP_X_0, 4); ADD_RANGE(R300_VAP_CLIP_X_0, 4);
ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1); ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1); ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5); ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_CNTL, 1); ADD_RANGE(R300_TX_INVALTAGS, 1);
ADD_RANGE(R300_TX_ENABLE, 1); ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4); ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1); ADD_RANGE(0x4214, 1);
@ -190,7 +205,7 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(0x42C0, 2); ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2); ADD_RANGE(R300_RS_CNTL_0, 2);
ADD_RANGE(0x43A4, 2); ADD_RANGE(R300_SC_HYPERZ, 2);
ADD_RANGE(0x43E8, 1); ADD_RANGE(0x43E8, 1);
ADD_RANGE(0x46A4, 5); ADD_RANGE(0x46A4, 5);
@ -209,14 +224,12 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(0x4E50, 9); ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1); ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2); ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3); ADD_RANGE(R300_ZB_CNTL, 3);
ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4); ADD_RANGE(R300_ZB_FORMAT, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1); ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1); ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
ADD_RANGE(0x4F30, 2); ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
ADD_RANGE(0x4F44, 1);
ADD_RANGE(0x4F54, 1);
ADD_RANGE(R300_TX_FILTER_0, 16); ADD_RANGE(R300_TX_FILTER_0, 16);
ADD_RANGE(R300_TX_FILTER1_0, 16); ADD_RANGE(R300_TX_FILTER1_0, 16);
@ -229,7 +242,7 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */ /* Sporadic registers used as primitives are emitted */
ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1); ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1); ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8); ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
@ -243,6 +256,7 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(R500_RS_INST_0, 16); ADD_RANGE(R500_RS_INST_0, 16);
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2); ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2); ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
} else { } else {
ADD_RANGE(R300_PFS_CNTL_0, 3); ADD_RANGE(R300_PFS_CNTL_0, 3);
ADD_RANGE(R300_PFS_NODE_0, 4); ADD_RANGE(R300_PFS_NODE_0, 4);
@ -390,15 +404,28 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (sz * 16 > cmdbuf->bufsz) if (sz * 16 > cmdbuf->bufsz)
return -EINVAL; return -EINVAL;
BEGIN_RING(5 + sz * 4); /* VAP is very sensitive so we purge cache before we program it
/* Wait for VAP to come to senses.. */ * and we also flush its state before & after */
/* there is no need to emit it multiple times, (only once before VAP is programmed, BEGIN_RING(6);
but this optimization is for later */ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0); OUT_RING(R300_RB3D_DC_FLUSH);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
OUT_RING(0);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
OUT_RING(0);
ADVANCE_RING(); ADVANCE_RING();
cmdbuf->buf += sz * 16; cmdbuf->buf += sz * 16;
@ -426,6 +453,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
OUT_RING_TABLE((int *)cmdbuf->buf, 8); OUT_RING_TABLE((int *)cmdbuf->buf, 8);
ADVANCE_RING(); ADVANCE_RING();
BEGIN_RING(4);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
cmdbuf->buf += 8 * 4; cmdbuf->buf += 8 * 4;
cmdbuf->bufsz -= 8 * 4; cmdbuf->bufsz -= 8 * 4;
@ -545,22 +581,23 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
return 0; return 0;
} }
static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf) drm_radeon_kcmd_buffer_t *cmdbuf)
{ {
u32 *cmd = (u32 *) cmdbuf->buf; u32 *cmd;
int count, ret; int count;
int expected_count;
RING_LOCALS; RING_LOCALS;
count=(cmd[0]>>16) & 0x3fff; cmd = (u32 *) cmdbuf->buf;
count = (cmd[0]>>16) & 0x3fff;
expected_count = cmd[1] >> 16;
if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if ((cmd[1] & 0x8000ffff) != 0x80000810) { if (count && count != expected_count) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
return -EINVAL; count, expected_count);
}
ret = !radeon_check_offset(dev_priv, cmd[2]);
if (ret) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
return -EINVAL; return -EINVAL;
} }
@ -572,6 +609,50 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
cmdbuf->buf += (count+2)*4; cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4; cmdbuf->bufsz -= (count+2)*4;
if (!count) {
drm_r300_cmd_header_t header;
if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
header.u = *(unsigned int *)cmdbuf->buf;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
cmd = (u32 *) cmdbuf->buf;
if (header.header.cmd_type != R300_CMD_PACKET3 ||
header.packet3.packet != R300_CMD_PACKET3_RAW ||
cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
return -EINVAL;
}
if (!radeon_check_offset(dev_priv, cmd[2])) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
return -EINVAL;
}
if (cmd[3] != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
cmd[3], expected_count);
return -EINVAL;
}
BEGIN_RING(4);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
ADVANCE_RING();
cmdbuf->buf += 4*4;
cmdbuf->bufsz -= 4*4;
}
return 0; return 0;
} }
@ -615,11 +696,22 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
case RADEON_CNTL_BITBLT_MULTI: case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf); return r300_emit_bitblt_multi(dev_priv, cmdbuf);
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ case RADEON_CP_INDX_BUFFER:
return r300_emit_indx_buffer(dev_priv, cmdbuf); DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ return -EINVAL;
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ case RADEON_CP_3D_DRAW_IMMD_2:
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2:
/* triggers drawing of vertex buffers setup elsewhere */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
break;
case RADEON_CP_3D_DRAW_INDX_2:
/* triggers drawing using indices to vertex buffer */
/* whenever we send vertex we clear flush & purge */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
return r300_emit_draw_indx_2(dev_priv, cmdbuf);
case RADEON_WAIT_FOR_IDLE: case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP: case RADEON_CP_NOP:
/* these packets are safe */ /* these packets are safe */
@ -715,16 +807,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
*/ */
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{ {
uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS; RING_LOCALS;
BEGIN_RING(6); cache_z = R300_ZC_FLUSH;
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); cache_2d = R300_DC_FLUSH_2D;
OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A); cache_3d = R300_DC_FLUSH_3D;
OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
OUT_RING(R300_RB3D_ZCACHE_UNKNOWN_03); /* we can purge, primitive where draw since last purge */
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0)); cache_z |= R300_ZC_FREE;
OUT_RING(0x0); cache_2d |= R300_DC_FREE_2D;
cache_3d |= R300_DC_FREE_3D;
}
/* flush & purge zbuffer */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
OUT_RING(cache_z);
ADVANCE_RING(); ADVANCE_RING();
/* flush & purge 3d */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(cache_3d);
ADVANCE_RING();
/* flush & purge texture */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
OUT_RING(0);
ADVANCE_RING();
/* FIXME: is this one really needed ? */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
OUT_RING(0);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* flush & purge 2d through E2 as RB2D will trigger lockup */
BEGIN_RING(4);
OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
OUT_RING(cache_2d);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN);
ADVANCE_RING();
/* set flush & purge flags */
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
} }
/** /**
@ -906,8 +1035,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
DRM_DEBUG("\n"); DRM_DEBUG("\n");
/* See the comment above r300_emit_begin3d for why this call must be here, /* pacify */
* and what the cleanup gotos are for. */
r300_pacify(dev_priv); r300_pacify(dev_priv);
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {

View File

@ -320,7 +320,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* Therefore, I suspect writing zero to 0x2284 synchronizes the engine and * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
* avoids bugs caused by still running shaders reading bad data from memory. * avoids bugs caused by still running shaders reading bad data from memory.
*/ */
#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */ #define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
/* Absolutely no clue what this register is about. */ /* Absolutely no clue what this register is about. */
#define R300_VAP_UNKNOWN_2288 0x2288 #define R300_VAP_UNKNOWN_2288 0x2288
@ -516,7 +516,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* gap */ /* gap */
/* Zero to flush caches. */ /* Zero to flush caches. */
#define R300_TX_CNTL 0x4100 #define R300_TX_INVALTAGS 0x4100
#define R300_TX_FLUSH 0x0 #define R300_TX_FLUSH 0x0
/* The upper enable bits are guessed, based on fglrx reported limits. */ /* The upper enable bits are guessed, based on fglrx reported limits. */
@ -705,6 +705,27 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) # define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
/* END: Rasterization / Interpolators - many guesses */ /* END: Rasterization / Interpolators - many guesses */
/* Hierarchical Z Enable */
#define R300_SC_HYPERZ 0x43a4
# define R300_SC_HYPERZ_DISABLE (0 << 0)
# define R300_SC_HYPERZ_ENABLE (1 << 0)
# define R300_SC_HYPERZ_MIN (0 << 1)
# define R300_SC_HYPERZ_MAX (1 << 1)
# define R300_SC_HYPERZ_ADJ_256 (0 << 2)
# define R300_SC_HYPERZ_ADJ_128 (1 << 2)
# define R300_SC_HYPERZ_ADJ_64 (2 << 2)
# define R300_SC_HYPERZ_ADJ_32 (3 << 2)
# define R300_SC_HYPERZ_ADJ_16 (4 << 2)
# define R300_SC_HYPERZ_ADJ_8 (5 << 2)
# define R300_SC_HYPERZ_ADJ_4 (6 << 2)
# define R300_SC_HYPERZ_ADJ_2 (7 << 2)
# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5)
# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6)
#define R300_SC_EDGERULE 0x43a8
/* BEGIN: Scissors and cliprects */ /* BEGIN: Scissors and cliprects */
/* There are four clipping rectangles. Their corner coordinates are inclusive. /* There are four clipping rectangles. Their corner coordinates are inclusive.
@ -1344,6 +1365,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
#define R300_RB3D_AARESOLVE_CTL 0x4E88
/* gap */ /* gap */
/* Guess by Vladimir. /* Guess by Vladimir.
@ -1358,19 +1380,14 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* for this. * for this.
* Bit (1<<8) is the "test" bit. so plain write is 6 - vd * Bit (1<<8) is the "test" bit. so plain write is 6 - vd
*/ */
#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00 #define R300_ZB_CNTL 0x4F00
# define R300_RB3D_Z_DISABLED_1 0x00000010 # define R300_STENCIL_ENABLE (1 << 0)
# define R300_RB3D_Z_DISABLED_2 0x00000014 # define R300_Z_ENABLE (1 << 1)
# define R300_RB3D_Z_TEST 0x00000012 # define R300_Z_WRITE_ENABLE (1 << 2)
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 # define R300_Z_SIGNED_COMPARE (1 << 3)
# define R300_RB3D_Z_WRITE_ONLY 0x00000006 # define R300_STENCIL_FRONT_BACK (1 << 4)
# define R300_RB3D_Z_TEST 0x00000012 #define R300_ZB_ZSTENCILCNTL 0x4f04
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_STENCIL_ENABLE 0x00000001
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
/* functions */ /* functions */
# define R300_ZS_NEVER 0 # define R300_ZS_NEVER 0
# define R300_ZS_LESS 1 # define R300_ZS_LESS 1
@ -1390,52 +1407,166 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_ZS_INVERT 5 # define R300_ZS_INVERT 5
# define R300_ZS_INCR_WRAP 6 # define R300_ZS_INCR_WRAP 6
# define R300_ZS_DECR_WRAP 7 # define R300_ZS_DECR_WRAP 7
# define R300_Z_FUNC_SHIFT 0
/* front and back refer to operations done for front /* front and back refer to operations done for front
and back faces, i.e. separate stencil function support */ and back faces, i.e. separate stencil function support */
# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0 # define R300_S_FRONT_FUNC_SHIFT 3
# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3 # define R300_S_FRONT_SFAIL_OP_SHIFT 6
# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6 # define R300_S_FRONT_ZPASS_OP_SHIFT 9
# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9 # define R300_S_FRONT_ZFAIL_OP_SHIFT 12
# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12 # define R300_S_BACK_FUNC_SHIFT 15
# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15 # define R300_S_BACK_SFAIL_OP_SHIFT 18
# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18 # define R300_S_BACK_ZPASS_OP_SHIFT 21
# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21 # define R300_S_BACK_ZFAIL_OP_SHIFT 24
# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08 #define R300_ZB_STENCILREFMASK 0x4f08
# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0 # define R300_STENCILREF_SHIFT 0
# define R300_RB3D_ZS2_STENCIL_MASK 0xFF # define R300_STENCILREF_MASK 0x000000ff
# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8 # define R300_STENCILMASK_SHIFT 8
# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16 # define R300_STENCILMASK_MASK 0x0000ff00
# define R300_STENCILWRITEMASK_SHIFT 16
# define R300_STENCILWRITEMASK_MASK 0x00ff0000
/* gap */ /* gap */
#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10 #define R300_ZB_FORMAT 0x4f10
# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) # define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0)
# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) # define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0)
/* 16 bit format or some aditional bit ? */ # define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0)
# define R300_DEPTH_FORMAT_UNK32 (32 << 0) /* reserved up to (15 << 0) */
# define R300_INVERT_13E3_LEADING_ONES (0 << 4)
# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
#define R300_RB3D_EARLY_Z 0x4F14 #define R300_ZB_ZTOP 0x4F14
# define R300_EARLY_Z_DISABLE (0 << 0) # define R300_ZTOP_DISABLE (0 << 0)
# define R300_EARLY_Z_ENABLE (1 << 0) # define R300_ZTOP_ENABLE (1 << 0)
/* gap */ /* gap */
#define R300_RB3D_ZCACHE_CTLSTAT 0x4F18 /* GUESS */ #define R300_ZB_ZCACHE_CTLSTAT 0x4f18
# define R300_RB3D_ZCACHE_UNKNOWN_01 0x1 # define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0)
# define R300_RB3D_ZCACHE_UNKNOWN_03 0x3 # define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1)
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1)
# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31)
# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31)
#define R300_ZB_BW_CNTL 0x4f1c
# define R300_HIZ_DISABLE (0 << 0)
# define R300_HIZ_ENABLE (1 << 0)
# define R300_HIZ_MIN (0 << 1)
# define R300_HIZ_MAX (1 << 1)
# define R300_FAST_FILL_DISABLE (0 << 2)
# define R300_FAST_FILL_ENABLE (1 << 2)
# define R300_RD_COMP_DISABLE (0 << 3)
# define R300_RD_COMP_ENABLE (1 << 3)
# define R300_WR_COMP_DISABLE (0 << 4)
# define R300_WR_COMP_ENABLE (1 << 4)
# define R300_ZB_CB_CLEAR_RMW (0 << 5)
# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5)
# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6)
# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6)
# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7)
# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7)
# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8)
# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8)
# define R500_BMASK_ENABLE (0 << 10)
# define R500_BMASK_DISABLE (1 << 10)
# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11)
# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11)
# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12)
# define R500_HIZ_FP_EXP_BITS_1 (1 << 12)
# define R500_HIZ_FP_EXP_BITS_2 (2 << 12)
# define R500_HIZ_FP_EXP_BITS_3 (3 << 12)
# define R500_HIZ_FP_EXP_BITS_4 (4 << 12)
# define R500_HIZ_FP_EXP_BITS_5 (5 << 12)
# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15)
# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15)
# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16)
# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16)
# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17)
# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17)
# define R500_PEQ_PACKING_DISABLE (0 << 18)
# define R500_PEQ_PACKING_ENABLE (1 << 18)
# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18)
# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18)
/* gap */ /* gap */
#define R300_RB3D_DEPTHOFFSET 0x4F20 /* Z Buffer Address Offset.
#define R300_RB3D_DEPTHPITCH 0x4F24 * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */ */
# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */ #define R300_ZB_DEPTHOFFSET 0x4f20
# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ /* Z Buffer Pitch and Endian Control */
# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ #define R300_ZB_DEPTHPITCH 0x4f24
# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ # define R300_DEPTHPITCH_MASK 0x00003FFC
# define R300_DEPTHMACROTILE_DISABLE (0 << 16)
# define R300_DEPTHMACROTILE_ENABLE (1 << 16)
# define R300_DEPTHMICROTILE_LINEAR (0 << 17)
# define R300_DEPTHMICROTILE_TILED (1 << 17)
# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
# define R300_DEPTHENDIAN_NO_SWAP (0 << 18)
# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18)
# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18)
# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
/* Z Buffer Clear Value */
#define R300_ZB_DEPTHCLEARVALUE 0x4f28
#define R300_ZB_ZMASK_OFFSET 0x4f30
#define R300_ZB_ZMASK_PITCH 0x4f34
#define R300_ZB_ZMASK_WRINDEX 0x4f38
#define R300_ZB_ZMASK_DWORD 0x4f3c
#define R300_ZB_ZMASK_RDINDEX 0x4f40
/* Hierarchical Z Memory Offset */
#define R300_ZB_HIZ_OFFSET 0x4f44
/* Hierarchical Z Write Index */
#define R300_ZB_HIZ_WRINDEX 0x4f48
/* Hierarchical Z Data */
#define R300_ZB_HIZ_DWORD 0x4f4c
/* Hierarchical Z Read Index */
#define R300_ZB_HIZ_RDINDEX 0x4f50
/* Hierarchical Z Pitch */
#define R300_ZB_HIZ_PITCH 0x4f54
/* Z Buffer Z Pass Counter Data */
#define R300_ZB_ZPASS_DATA 0x4f58
/* Z Buffer Z Pass Counter Address */
#define R300_ZB_ZPASS_ADDR 0x4f5c
/* Depth buffer X and Y coordinate offset */
#define R300_ZB_DEPTHXY_OFFSET 0x4f60
# define R300_DEPTHX_OFFSET_SHIFT 1
# define R300_DEPTHX_OFFSET_MASK 0x000007FE
# define R300_DEPTHY_OFFSET_SHIFT 17
# define R300_DEPTHY_OFFSET_MASK 0x07FE0000
/* Sets the fifo sizes */
#define R500_ZB_FIFO_SIZE 0x4fd0
# define R500_OP_FIFO_SIZE_FULL (0 << 0)
# define R500_OP_FIFO_SIZE_HALF (1 << 0)
# define R500_OP_FIFO_SIZE_QUATER (2 << 0)
# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
/* Stencil Reference Value and Mask for backfacing quads */
/* R300_ZB_STENCILREFMASK handles front face */
#define R500_ZB_STENCILREFMASK_BF 0x4fd4
# define R500_STENCILREF_SHIFT 0
# define R500_STENCILREF_MASK 0x000000ff
# define R500_STENCILMASK_SHIFT 8
# define R500_STENCILMASK_MASK 0x0000ff00
# define R500_STENCILWRITEMASK_SHIFT 16
# define R500_STENCILWRITEMASK_MASK 0x00ff0000
/* BEGIN: Vertex program instruction set */ /* BEGIN: Vertex program instruction set */

View File

@ -40,6 +40,7 @@
#define RADEON_FIFO_DEBUG 0 #define RADEON_FIFO_DEBUG 0
static int radeon_do_cleanup_cp(struct drm_device * dev); static int radeon_do_cleanup_cp(struct drm_device * dev);
static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{ {
@ -144,8 +145,12 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
} else { } else {
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_base_lo); RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
} }
@ -290,23 +295,8 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
DRM_UDELAY(1); DRM_UDELAY(1);
} }
} else { } else {
/* 3D */ /* don't flush or purge cache here or lockup */
tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT); return 0;
tmp |= RADEON_RB3D_DC_FLUSH_ALL;
RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
/* 2D */
tmp = RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT);
tmp |= RADEON_RB3D_DC_FLUSH_ALL;
RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT)
& RADEON_RB3D_DC_BUSY)) {
return 0;
}
DRM_UDELAY(1);
}
} }
#if RADEON_FIFO_DEBUG #if RADEON_FIFO_DEBUG
@ -329,6 +319,9 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
return 0; return 0;
DRM_UDELAY(1); DRM_UDELAY(1);
} }
DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
RADEON_READ(RADEON_RBBM_STATUS),
RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG #if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n"); DRM_ERROR("failed!\n");
@ -355,6 +348,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
} }
DRM_UDELAY(1); DRM_UDELAY(1);
} }
DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
RADEON_READ(RADEON_RBBM_STATUS),
RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG #if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n"); DRM_ERROR("failed!\n");
@ -448,6 +444,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
DRM_INFO("Loading R300 Microcode\n"); DRM_INFO("Loading R300 Microcode\n");
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
@ -536,14 +533,20 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
dev_priv->cp_running = 1; dev_priv->cp_running = 1;
BEGIN_RING(6); BEGIN_RING(8);
/* isync can only be written through cp on r5xx write it here */
OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
RADEON_PURGE_CACHE(); RADEON_PURGE_CACHE();
RADEON_PURGE_ZCACHE(); RADEON_PURGE_ZCACHE();
RADEON_WAIT_UNTIL_IDLE(); RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING(); ADVANCE_RING();
COMMIT_RING(); COMMIT_RING();
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
} }
/* Reset the Command Processor. This will not flush any pending /* Reset the Command Processor. This will not flush any pending
@ -858,14 +861,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
RS480_REQ_TYPE_SNOOP_DIS)); RS480_REQ_TYPE_SNOOP_DIS));
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
IGP_WRITE_MCIND(RS690_MC_AGP_BASE,
(unsigned int)dev_priv->gart_vm_start);
IGP_WRITE_MCIND(RS690_MC_AGP_BASE_2, 0);
} else {
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
RADEON_WRITE(RS480_AGP_BASE_2, 0);
}
dev_priv->gart_size = 32*1024*1024; dev_priv->gart_size = 32*1024*1024;
temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
@ -1064,7 +1060,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
(dev_priv->color_fmt << 10) | (dev_priv->color_fmt << 10) |
(dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0)); (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
dev_priv->depth_clear.rb3d_zstencilcntl = dev_priv->depth_clear.rb3d_zstencilcntl =
(dev_priv->depth_fmt | (dev_priv->depth_fmt |
RADEON_Z_TEST_ALWAYS | RADEON_Z_TEST_ALWAYS |
@ -1258,7 +1254,6 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
/* if we have an offset set from userspace */ /* if we have an offset set from userspace */
if (dev_priv->pcigart_offset_set) { if (dev_priv->pcigart_offset_set) {
/* if it came from userspace - remap it */ /* if it came from userspace - remap it */
if (dev_priv->pcigart_offset_set == 1) { if (dev_priv->pcigart_offset_set == 1) {
dev_priv->gart_info.bus_addr = dev_priv->gart_info.bus_addr =
@ -1409,6 +1404,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
radeon_cp_init_ring_buffer(dev, dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv);
radeon_do_engine_reset(dev); radeon_do_engine_reset(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
DRM_DEBUG("radeon_do_resume_cp() complete\n"); DRM_DEBUG("radeon_do_resume_cp() complete\n");

View File

@ -39,7 +39,7 @@
#define DRIVER_NAME "radeon" #define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon" #define DRIVER_DESC "ATI Radeon"
#define DRIVER_DATE "20080528" #define DRIVER_DATE "20080613"
/* Interface history: /* Interface history:
* *
@ -286,6 +286,9 @@ struct drm_radeon_master_private {
drm_radeon_sarea_t *sarea_priv; drm_radeon_sarea_t *sarea_priv;
}; };
#define RADEON_FLUSH_EMITED (1 < 0)
#define RADEON_PURGE_EMITED (1 < 1)
typedef struct drm_radeon_private { typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring; drm_radeon_ring_buffer_t ring;
@ -360,7 +363,6 @@ typedef struct drm_radeon_private {
struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_surface surfaces[RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
u32 scratch_ages[5]; u32 scratch_ages[5];
unsigned int crtc_last_cnt; unsigned int crtc_last_cnt;
@ -370,12 +372,9 @@ typedef struct drm_radeon_private {
uint32_t flags; /* see radeon_chip_flags */ uint32_t flags; /* see radeon_chip_flags */
unsigned long fb_aper_offset; unsigned long fb_aper_offset;
int num_gb_pipes;
struct radeon_mm_info mm; struct radeon_mm_info mm;
drm_local_map_t *mmio; drm_local_map_t *mmio;
uint32_t chip_family;
unsigned long pcigart_offset; unsigned long pcigart_offset;
unsigned int pcigart_offset_set; unsigned int pcigart_offset_set;
@ -393,6 +392,9 @@ typedef struct drm_radeon_private {
enum radeon_pll_errata pll_errata; enum radeon_pll_errata pll_errata;
int num_gb_pipes;
int track_flush;
uint32_t chip_family; /* extract from flags */
} drm_radeon_private_t; } drm_radeon_private_t;
typedef struct drm_radeon_buf_priv { typedef struct drm_radeon_buf_priv {
@ -453,6 +455,7 @@ extern void radeon_mem_release(struct drm_file *file_priv,
struct mem_block *heap); struct mem_block *heap);
/* radeon_irq.c */ /* radeon_irq.c */
extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
@ -746,11 +749,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
#define RADEON_PP_TXFILTER_1 0x1c6c #define RADEON_PP_TXFILTER_1 0x1c6c
#define RADEON_PP_TXFILTER_2 0x1c84 #define RADEON_PP_TXFILTER_2 0x1c84
#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
# define RADEON_RB2D_DC_FLUSH (3 << 0)
# define RADEON_RB2D_DC_FREE (3 << 2)
# define RADEON_RB2D_DC_FLUSH_ALL 0xf
# define RADEON_RB2D_DC_BUSY (1 << 31)
#define RADEON_RB3D_CNTL 0x1c3c #define RADEON_RB3D_CNTL 0x1c3c
# define RADEON_ALPHA_BLEND_ENABLE (1 << 0) # define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
# define RADEON_PLANE_MASK_ENABLE (1 << 1) # define RADEON_PLANE_MASK_ENABLE (1 << 1)
@ -776,9 +774,10 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 #define R300_ZB_ZCACHE_CTLSTAT 0x4f18
# define R300_ZC_FLUSH (1 << 0) # define R300_ZC_FLUSH (1 << 0)
# define R300_ZC_FREE (1 << 1) # define R300_ZC_FREE (1 << 1)
# define R300_ZC_FLUSH_ALL 0x3
# define R300_ZC_BUSY (1 << 31) # define R300_ZC_BUSY (1 << 31)
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
# define R300_RB3D_DC_FLUSH (2 << 0)
# define R300_RB3D_DC_FREE (2 << 2)
# define R300_RB3D_DC_FINISH (1 << 4) # define R300_RB3D_DC_FINISH (1 << 4)
#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
# define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_MASK (7 << 4)
@ -1325,21 +1324,21 @@ do { \
#define RADEON_FLUSH_CACHE() do { \ #define RADEON_FLUSH_CACHE() do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING( RADEON_RB3D_DC_FLUSH ); \ OUT_RING(RADEON_RB3D_DC_FLUSH); \
} else { \ } else { \
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING( RADEON_RB3D_DC_FLUSH ); \ OUT_RING(R300_RB3D_DC_FLUSH); \
} \ } \
} while (0) } while (0)
#define RADEON_PURGE_CACHE() do { \ #define RADEON_PURGE_CACHE() do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \ OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
} else { \ } else { \
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \
} \ } \
} while (0) } while (0)
@ -1355,11 +1354,11 @@ do { \
#define RADEON_PURGE_ZCACHE() do { \ #define RADEON_PURGE_ZCACHE() do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
} else { \ } else { \
OUT_RING( CP_PACKET0( R300_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
OUT_RING( R300_ZC_FLUSH_ALL ); \ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
} \ } \
} while (0) } while (0)

View File

@ -35,7 +35,7 @@
#include "radeon_drm.h" #include "radeon_drm.h"
#include "radeon_drv.h" #include "radeon_drv.h"
static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
@ -255,35 +255,27 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
u32 crtc_cnt_reg, crtc_status_reg;
if (!dev_priv) { if (!dev_priv) {
DRM_ERROR("called with no initialization\n"); DRM_ERROR("called with no initialization\n");
return -EINVAL; return -EINVAL;
} }
if (crtc < 0 || crtc > 1) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
if (crtc == 0) { if (crtc == 0)
crtc_cnt_reg = R500_D1CRTC_FRAME_COUNT; return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
crtc_status_reg = R500_D1CRTC_STATUS; else
} else if (crtc == 1) { return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
crtc_cnt_reg = R500_D2CRTC_FRAME_COUNT;
crtc_status_reg = R500_D2CRTC_STATUS;
} else
return -EINVAL;
return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
} else { } else {
if (crtc == 0) { if (crtc == 0)
crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME; return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
crtc_status_reg = RADEON_CRTC_STATUS; else
} else if (crtc == 1) { return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
crtc_status_reg = RADEON_CRTC2_STATUS;
} else {
return -EINVAL;
}
return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
} }
} }
@ -382,27 +374,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
int radeon_vblank_crtc_get(struct drm_device *dev) int radeon_vblank_crtc_get(struct drm_device *dev)
{ {
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
u32 flag;
u32 value;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { return dev_priv->vblank_crtc;
flag = RADEON_READ(R500_DxMODE_INT_MASK);
value = 0;
if (flag & R500_D1MODE_INT_MASK)
value |= DRM_RADEON_VBLANK_CRTC1;
if (flag & R500_D2MODE_INT_MASK)
value |= DRM_RADEON_VBLANK_CRTC2;
} else {
flag = RADEON_READ(RADEON_GEN_INT_CNTL);
value = 0;
if (flag & RADEON_CRTC_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC1;
if (flag & RADEON_CRTC2_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC2;
}
return value;
} }
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)

View File

@ -88,7 +88,7 @@ static struct mem_block *alloc_block(struct mem_block *heap, int size,
list_for_each(p, heap) { list_for_each(p, heap) {
int start = (p->start + mask) & ~mask; int start = (p->start + mask) & ~mask;
if (p->file_priv == 0 && start + size <= p->start + p->size) if (p->file_priv == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, file_priv); return split_block(p, start, size, file_priv);
} }
@ -113,7 +113,7 @@ static void free_block(struct mem_block *p)
/* Assumes a single contiguous range. Needs a special file_priv in /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed. * 'heap' to stop it being subsumed.
*/ */
if (p->next->file_priv == 0) { if (p->next->file_priv == NULL) {
struct mem_block *q = p->next; struct mem_block *q = p->next;
p->size += q->size; p->size += q->size;
p->next = q->next; p->next = q->next;
@ -121,7 +121,7 @@ static void free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFS); drm_free(q, sizeof(*q), DRM_MEM_BUFS);
} }
if (p->prev->file_priv == 0) { if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev; struct mem_block *q = p->prev;
q->size += p->size; q->size += p->size;
q->next = p->next; q->next = p->next;
@ -174,7 +174,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
* 'heap' to stop it being subsumed. * 'heap' to stop it being subsumed.
*/ */
list_for_each(p, heap) { list_for_each(p, heap) {
while (p->file_priv == 0 && p->next->file_priv == 0) { while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next; struct mem_block *q = p->next;
p->size += q->size; p->size += q->size;
p->next = q->next; p->next = q->next;

View File

@ -123,11 +123,15 @@ struct xgi_state_info {
#define DRM_XGI_FREE 2 #define DRM_XGI_FREE 2
#define DRM_XGI_SUBMIT_CMDLIST 3 #define DRM_XGI_SUBMIT_CMDLIST 3
#define DRM_XGI_STATE_CHANGE 4 #define DRM_XGI_STATE_CHANGE 4
#define DRM_XGI_SET_FENCE 5
#define DRM_XGI_WAIT_FENCE 6
#define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap) #define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
#define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
#define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32) #define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info) #define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info) #define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
#define XGI_IOCTL_SET_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_SET_FENCE, u32)
#define XGI_IOCTL_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_WAIT_FENCE, u32)
#endif /* _XGI_DRM_H_ */ #endif /* _XGI_DRM_H_ */