Merge commit 'origin/master' into drm-gem
Conflicts: linux-core/Makefile.kernel shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.cmain
commit
1d2bb68d28
|
@ -34,76 +34,125 @@
|
|||
#include "drmP.h"
|
||||
|
||||
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
|
||||
#define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
|
||||
|
||||
#define ATI_PCIE_WRITE 0x4
|
||||
#define ATI_PCIE_READ 0x8
|
||||
|
||||
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size,
|
||||
PAGE_SIZE,
|
||||
gart_info->table_mask);
|
||||
if (dev->sg->dmah == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_ati_free_pcigart_table(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
drm_pci_free(dev, dev->sg->dmah);
|
||||
dev->sg->dmah = NULL;
|
||||
}
|
||||
|
||||
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
/* we need to support large memory configurations */
|
||||
if (dev->sg == NULL) {
|
||||
DRM_ERROR("no scatter/gather memory!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (gart_info->bus_addr) {
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
|
||||
gart_info->bus_addr = 0;
|
||||
if (dev->sg->dmah)
|
||||
drm_ati_free_pcigart_table(dev, gart_info);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int drm_ati_pcigart_init(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
unsigned long pages;
|
||||
u32 *pci_gart = NULL, page_base;
|
||||
int i, j;
|
||||
|
||||
void *address = NULL;
|
||||
unsigned long pages;
|
||||
u32 *pci_gart, page_base;
|
||||
dma_addr_t bus_address = 0;
|
||||
int i, j, ret = 0;
|
||||
int max_pages;
|
||||
dma_addr_t entry_addr;
|
||||
|
||||
/* we need to support large memory configurations */
|
||||
if (dev->sg == NULL) {
|
||||
DRM_ERROR( "no scatter/gather memory!\n" );
|
||||
return 0;
|
||||
DRM_ERROR("no scatter/gather memory!\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
|
||||
/* GART table in system memory */
|
||||
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size, 0,
|
||||
0xfffffffful);
|
||||
if (dev->sg->dmah == NULL) {
|
||||
DRM_ERROR("cannot allocate PCI GART table!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
gart_info->addr = (void *)dev->sg->dmah->vaddr;
|
||||
gart_info->bus_addr = dev->sg->dmah->busaddr;
|
||||
pci_gart = (u32 *)dev->sg->dmah->vaddr;
|
||||
} else {
|
||||
/* GART table in framebuffer memory */
|
||||
pci_gart = gart_info->addr;
|
||||
}
|
||||
|
||||
pages = DRM_MIN(dev->sg->pages, gart_info->table_size / sizeof(u32));
|
||||
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
|
||||
|
||||
bzero(pci_gart, gart_info->table_size);
|
||||
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
|
||||
if (ret) {
|
||||
DRM_ERROR("cannot allocate PCI GART page!\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
address = (void *)dev->sg->dmah->vaddr;
|
||||
bus_address = dev->sg->dmah->busaddr;
|
||||
} else {
|
||||
address = gart_info->addr;
|
||||
bus_address = gart_info->bus_addr;
|
||||
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
|
||||
(unsigned int)bus_address, (unsigned long)address);
|
||||
}
|
||||
|
||||
pci_gart = (u32 *) address;
|
||||
|
||||
max_pages = (gart_info->table_size / sizeof(u32));
|
||||
pages = (dev->sg->pages <= max_pages)
|
||||
? dev->sg->pages : max_pages;
|
||||
|
||||
memset(pci_gart, 0, max_pages * sizeof(u32));
|
||||
|
||||
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
|
||||
|
||||
for ( i = 0 ; i < pages ; i++ ) {
|
||||
page_base = (u32) dev->sg->busaddr[i];
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
entry_addr = dev->sg->busaddr[i];
|
||||
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
|
||||
page_base = (u32) entry_addr & ATI_PCIGART_PAGE_MASK;
|
||||
switch(gart_info->gart_reg_if) {
|
||||
case DRM_ATI_GART_IGP:
|
||||
*pci_gart = cpu_to_le32(page_base | 0xc);
|
||||
page_base |= (upper_32_bits(entry_addr) & 0xff) << 4;
|
||||
page_base |= 0xc;
|
||||
break;
|
||||
case DRM_ATI_GART_PCIE:
|
||||
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
|
||||
page_base >>= 8;
|
||||
page_base |= (upper_32_bits(entry_addr) & 0xff) << 24;
|
||||
page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
|
||||
break;
|
||||
default:
|
||||
*pci_gart = cpu_to_le32(page_base);
|
||||
case DRM_ATI_GART_PCI:
|
||||
break;
|
||||
}
|
||||
*pci_gart = cpu_to_le32(page_base);
|
||||
pci_gart++;
|
||||
page_base += ATI_PCIGART_PAGE_SIZE;
|
||||
entry_addr += ATI_PCIGART_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_MEMORYBARRIER();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int drm_ati_pcigart_cleanup(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
if (dev->sg == NULL) {
|
||||
DRM_ERROR( "no scatter/gather memory!\n" );
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_pci_free(dev, dev->sg->dmah);
|
||||
|
||||
return 1;
|
||||
ret = 1;
|
||||
|
||||
done:
|
||||
gart_info->addr = address;
|
||||
gart_info->bus_addr = bus_address;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -632,7 +632,7 @@ struct drm_ati_pcigart_info {
|
|||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
|
||||
#endif
|
||||
|
||||
#define upper_32_bits(_val) (((u64)(_val)) >> 32)
|
||||
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
|
||||
|
||||
struct drm_driver_info {
|
||||
int (*load)(struct drm_device *, unsigned long flags);
|
||||
|
@ -733,11 +733,13 @@ struct drm_device {
|
|||
|
||||
/* Locks */
|
||||
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
|
||||
struct mtx vbl_lock; /* protects vblank operations */
|
||||
struct mtx dma_lock; /* protects dev->dma */
|
||||
struct mtx irq_lock; /* protects irq condition checks */
|
||||
struct mtx dev_lock; /* protects everything else */
|
||||
#endif
|
||||
DRM_SPINTYPE drw_lock;
|
||||
DRM_SPINTYPE tsk_lock;
|
||||
|
||||
/* Usage Counters */
|
||||
int open_count; /* Outstanding files open */
|
||||
|
@ -785,25 +787,21 @@ struct drm_device {
|
|||
|
||||
atomic_t context_flag; /* Context swapping flag */
|
||||
int last_context; /* Last current context */
|
||||
int vblank_disable_allowed;
|
||||
wait_queue_head_t *vbl_queue; /* vblank wait queue */
|
||||
atomic_t *_vblank_count; /* number of VBLANK interrupts */
|
||||
/* (driver must alloc the right number of counters) */
|
||||
struct mtx vbl_lock;
|
||||
struct drm_vbl_sig_list *vbl_sigs; /* signal list to send on VBLANK */
|
||||
atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
|
||||
atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
|
||||
u32 *last_vblank; /* protected by dev->vbl_lock, used */
|
||||
/* for wraparound handling */
|
||||
|
||||
u32 *vblank_offset; /* used to track how many vblanks */
|
||||
int *vblank_enabled; /* so we don't call enable more than */
|
||||
/* once per disable */
|
||||
u32 *vblank_premodeset; /* were lost during modeset */
|
||||
int *vblank_inmodeset; /* Display driver is setting mode */
|
||||
struct callout vblank_disable_timer;
|
||||
unsigned long max_vblank_count; /* size of vblank counter register */
|
||||
u32 max_vblank_count; /* size of vblank counter register */
|
||||
int num_crtcs;
|
||||
atomic_t vbl_received;
|
||||
atomic_t vbl_received2;
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
struct sigio *buf_sigio; /* Processes waiting for SIGIO */
|
||||
|
@ -933,7 +931,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc);
|
|||
u32 drm_vblank_count(struct drm_device *dev, int crtc);
|
||||
int drm_vblank_get(struct drm_device *dev, int crtc);
|
||||
void drm_vblank_put(struct drm_device *dev, int crtc);
|
||||
void drm_update_vblank_count(struct drm_device *dev, int crtc);
|
||||
int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
|
||||
int drm_vblank_init(struct drm_device *dev, int num_crtcs);
|
||||
void drm_vbl_send_signals(struct drm_device *dev, int crtc);
|
||||
|
@ -1090,6 +1087,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
size_t align, dma_addr_t maxaddr);
|
||||
void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
|
||||
|
||||
#define drm_core_ioremap_wc drm_core_ioremap
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void
|
||||
drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
|
||||
|
|
|
@ -832,12 +832,12 @@ int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
|
|||
if (request->count < 0 || request->count > 4096)
|
||||
return EINVAL;
|
||||
|
||||
DRM_SPINLOCK(&dev->dma_lock);
|
||||
|
||||
order = drm_order(request->size);
|
||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||
return EINVAL;
|
||||
|
||||
DRM_SPINLOCK(&dev->dma_lock);
|
||||
|
||||
/* No more allocations after first buffer-using ioctl. */
|
||||
if (dev->buf_use != 0) {
|
||||
DRM_SPINUNLOCK(&dev->dma_lock);
|
||||
|
|
|
@ -125,6 +125,7 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
};
|
||||
|
||||
|
@ -202,8 +203,11 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
|
|||
DRM_DEV_MODE,
|
||||
"dri/card%d", unit);
|
||||
#if __FreeBSD_version >= 500000
|
||||
mtx_init(&dev->dev_lock, "drm device", NULL, MTX_DEF);
|
||||
mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
|
||||
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
|
||||
mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
|
||||
mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
|
||||
mtx_init(&dev->tsk_lock, "drmtsk", NULL, MTX_DEF);
|
||||
#endif
|
||||
|
||||
id_entry = drm_find_description(pci_get_vendor(dev->device),
|
||||
|
@ -542,6 +546,8 @@ static int drm_load(struct drm_device *dev)
|
|||
/* Shared code returns -errno. */
|
||||
retcode = -dev->driver.load(dev,
|
||||
dev->id_entry->driver_private);
|
||||
if (pci_enable_busmaster(dev->device))
|
||||
DRM_ERROR("Request to enable bus-master failed.\n");
|
||||
DRM_UNLOCK();
|
||||
if (retcode != 0)
|
||||
goto error;
|
||||
|
@ -594,6 +600,9 @@ error:
|
|||
#ifdef __FreeBSD__
|
||||
destroy_dev(dev->devnode);
|
||||
#if __FreeBSD_version >= 500000
|
||||
mtx_destroy(&dev->drw_lock);
|
||||
mtx_destroy(&dev->irq_lock);
|
||||
mtx_destroy(&dev->vbl_lock);
|
||||
mtx_destroy(&dev->dev_lock);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -649,7 +658,14 @@ static void drm_unload(struct drm_device *dev)
|
|||
delete_unrhdr(dev->drw_unrhdr);
|
||||
|
||||
drm_mem_uninit();
|
||||
|
||||
if (pci_disable_busmaster(dev->device))
|
||||
DRM_ERROR("Request to disable bus-master failed.\n");
|
||||
|
||||
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
|
||||
mtx_destroy(&dev->drw_lock);
|
||||
mtx_destroy(&dev->irq_lock);
|
||||
mtx_destroy(&dev->vbl_lock);
|
||||
mtx_destroy(&dev->dev_lock);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -66,6 +66,129 @@ drm_irq_handler_wrap(DRM_IRQ_ARGS)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void vblank_disable_fn(void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
int i;
|
||||
|
||||
if (callout_pending(&dev->vblank_disable_timer)) {
|
||||
/* callout was reset */
|
||||
return;
|
||||
}
|
||||
if (!callout_active(&dev->vblank_disable_timer)) {
|
||||
/* callout was stopped */
|
||||
return;
|
||||
}
|
||||
callout_deactivate(&dev->vblank_disable_timer);
|
||||
|
||||
if (!dev->vblank_disable_allowed)
|
||||
return;
|
||||
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
|
||||
dev->vblank_enabled[i]) {
|
||||
DRM_DEBUG("disabling vblank on crtc %d\n", i);
|
||||
dev->last_vblank[i] =
|
||||
dev->driver.get_vblank_counter(dev, i);
|
||||
dev->driver.disable_vblank(dev, i);
|
||||
dev->vblank_enabled[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void drm_vblank_cleanup(struct drm_device *dev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Bail if the driver didn't call drm_vblank_init() */
|
||||
if (dev->num_crtcs == 0)
|
||||
return;
|
||||
|
||||
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
|
||||
callout_stop(&dev->vblank_disable_timer);
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
|
||||
|
||||
callout_drain(&dev->vblank_disable_timer);
|
||||
|
||||
vblank_disable_fn((void *)dev);
|
||||
|
||||
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
|
||||
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
{
|
||||
int i, ret = ENOMEM;
|
||||
|
||||
callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0);
|
||||
atomic_set(&dev->vbl_signal_pending, 0);
|
||||
dev->num_crtcs = num_crtcs;
|
||||
|
||||
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vbl_queue)
|
||||
goto err;
|
||||
|
||||
dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vbl_sigs)
|
||||
goto err;
|
||||
|
||||
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->_vblank_count)
|
||||
goto err;
|
||||
|
||||
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_refcount)
|
||||
goto err;
|
||||
|
||||
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_enabled)
|
||||
goto err;
|
||||
|
||||
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
|
||||
if (!dev->last_vblank)
|
||||
goto err;
|
||||
|
||||
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_inmodeset)
|
||||
goto err;
|
||||
|
||||
/* Zero per-crtc vblank stuff */
|
||||
for (i = 0; i < num_crtcs; i++) {
|
||||
DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
|
||||
TAILQ_INIT(&dev->vbl_sigs[i]);
|
||||
atomic_set(&dev->_vblank_count[i], 0);
|
||||
atomic_set(&dev->vblank_refcount[i], 0);
|
||||
}
|
||||
|
||||
dev->vblank_disable_allowed = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
drm_vblank_cleanup(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_irq_install(struct drm_device *dev)
|
||||
{
|
||||
int retcode;
|
||||
|
@ -87,8 +210,6 @@ int drm_irq_install(struct drm_device *dev)
|
|||
|
||||
dev->context_flag = 0;
|
||||
|
||||
DRM_SPININIT(&dev->irq_lock, "DRM IRQ lock");
|
||||
|
||||
/* Before installing handler */
|
||||
dev->driver.irq_preinstall(dev);
|
||||
DRM_UNLOCK();
|
||||
|
@ -143,7 +264,6 @@ err:
|
|||
dev->irqrid = 0;
|
||||
}
|
||||
#endif
|
||||
DRM_SPINUNINIT(&dev->irq_lock);
|
||||
DRM_UNLOCK();
|
||||
return retcode;
|
||||
}
|
||||
|
@ -175,7 +295,7 @@ int drm_irq_uninstall(struct drm_device *dev)
|
|||
#elif defined(__NetBSD__) || defined(__OpenBSD__)
|
||||
pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
|
||||
#endif
|
||||
DRM_SPINUNINIT(&dev->irq_lock);
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -208,27 +328,35 @@ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static void vblank_disable_fn(void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
unsigned long irqflags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
|
||||
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
|
||||
dev->vblank_enabled[i]) {
|
||||
dev->driver.disable_vblank(dev, i);
|
||||
dev->vblank_enabled[i] = 0;
|
||||
}
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
||||
u32 drm_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return atomic_read(&dev->_vblank_count[crtc]) +
|
||||
dev->vblank_offset[crtc];
|
||||
return atomic_read(&dev->_vblank_count[crtc]);
|
||||
}
|
||||
|
||||
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
u32 cur_vblank, diff;
|
||||
|
||||
/*
|
||||
* Interrupts were disabled prior to this call, so deal with counter
|
||||
* wrap if needed.
|
||||
* NOTE! It's possible we lost a full dev->max_vblank_count events
|
||||
* here if the register is small or we had vblank interrupts off for
|
||||
* a long time.
|
||||
*/
|
||||
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
|
||||
diff = cur_vblank - dev->last_vblank[crtc];
|
||||
if (cur_vblank < dev->last_vblank[crtc]) {
|
||||
diff += dev->max_vblank_count;
|
||||
|
||||
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
|
||||
crtc, dev->last_vblank[crtc], cur_vblank, diff);
|
||||
}
|
||||
|
||||
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
|
||||
crtc, diff);
|
||||
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
}
|
||||
|
||||
int drm_vblank_get(struct drm_device *dev, int crtc)
|
||||
|
@ -244,8 +372,10 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
|||
ret = dev->driver.enable_vblank(dev, crtc);
|
||||
if (ret)
|
||||
atomic_dec(&dev->vblank_refcount[crtc]);
|
||||
else
|
||||
else {
|
||||
dev->vblank_enabled[crtc] = 1;
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
|
||||
|
||||
|
@ -254,71 +384,59 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
|||
|
||||
void drm_vblank_put(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
|
||||
/* Last user schedules interrupt disable */
|
||||
atomic_subtract_acq_int(&dev->vblank_refcount[crtc], 1);
|
||||
if (dev->vblank_refcount[crtc] == 0)
|
||||
callout_reset(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ,
|
||||
callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
|
||||
(timeout_t *)vblank_disable_fn, (void *)dev);
|
||||
}
|
||||
|
||||
void drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
drm_vbl_send_signals(dev, crtc);
|
||||
}
|
||||
|
||||
void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
u32 cur_vblank, diff;
|
||||
|
||||
/*
|
||||
* Interrupts were disabled prior to this call, so deal with counter
|
||||
* wrap if needed.
|
||||
* NOTE! It's possible we lost a full dev->max_vblank_count events
|
||||
* here if the register is small or we had vblank interrupts off for
|
||||
* a long time.
|
||||
*/
|
||||
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
|
||||
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
|
||||
if (cur_vblank < dev->last_vblank[crtc]) {
|
||||
diff = dev->max_vblank_count -
|
||||
dev->last_vblank[crtc];
|
||||
diff += cur_vblank;
|
||||
} else {
|
||||
diff = cur_vblank - dev->last_vblank[crtc];
|
||||
}
|
||||
dev->last_vblank[crtc] = cur_vblank;
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
|
||||
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
}
|
||||
|
||||
int drm_modeset_ctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_modeset_ctl *modeset = data;
|
||||
unsigned long irqflags;
|
||||
int crtc, ret = 0;
|
||||
u32 new;
|
||||
|
||||
/* If drm_vblank_init() hasn't been called yet, just no-op */
|
||||
if (!dev->num_crtcs)
|
||||
goto out;
|
||||
|
||||
crtc = modeset->crtc;
|
||||
if (crtc >= dev->num_crtcs) {
|
||||
ret = -EINVAL;
|
||||
ret = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid all the problems that might happen if interrupts
|
||||
* were enabled/disabled around or between these calls, we just
|
||||
* have the kernel take a reference on the CRTC (just once though
|
||||
* to avoid corrupting the count if multiple, mismatch calls occur),
|
||||
* so that interrupts remain enabled in the interim.
|
||||
*/
|
||||
switch (modeset->cmd) {
|
||||
case _DRM_PRE_MODESET:
|
||||
dev->vblank_premodeset[crtc] =
|
||||
dev->driver.get_vblank_counter(dev, crtc);
|
||||
if (!dev->vblank_inmodeset[crtc]) {
|
||||
dev->vblank_inmodeset[crtc] = 1;
|
||||
drm_vblank_get(dev, crtc);
|
||||
}
|
||||
break;
|
||||
case _DRM_POST_MODESET:
|
||||
new = dev->driver.get_vblank_counter(dev, crtc);
|
||||
dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
|
||||
if (dev->vblank_inmodeset[crtc]) {
|
||||
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
|
||||
dev->vblank_disable_allowed = 1;
|
||||
dev->vblank_inmodeset[crtc] = 0;
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
|
||||
drm_vblank_put(dev, crtc);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
ret = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -329,7 +447,6 @@ out:
|
|||
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_wait_vblank_t *vblwait = data;
|
||||
struct timeval now;
|
||||
int ret = 0;
|
||||
int flags, seq, crtc;
|
||||
|
||||
|
@ -350,7 +467,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
|
|||
if (crtc >= dev->num_crtcs)
|
||||
return EINVAL;
|
||||
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
ret = drm_vblank_get(dev, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq = drm_vblank_count(dev, crtc);
|
||||
|
||||
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
|
||||
|
@ -360,7 +479,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
|
|||
case _DRM_VBLANK_ABSOLUTE:
|
||||
break;
|
||||
default:
|
||||
return EINVAL;
|
||||
ret = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
|
||||
|
@ -381,124 +501,33 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
|
|||
|
||||
vblwait->reply.sequence = atomic_read(&dev->vbl_received);
|
||||
|
||||
DRM_SPINLOCK(&dev->irq_lock);
|
||||
DRM_SPINLOCK(&dev->vbl_lock);
|
||||
TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
|
||||
DRM_SPINUNLOCK(&dev->irq_lock);
|
||||
DRM_SPINUNLOCK(&dev->vbl_lock);
|
||||
ret = 0;
|
||||
#endif
|
||||
ret = EINVAL;
|
||||
} else {
|
||||
unsigned long cur_vblank;
|
||||
|
||||
DRM_LOCK();
|
||||
/* shared code returns -errno */
|
||||
|
||||
ret = drm_vblank_get(dev, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
|
||||
(((cur_vblank = drm_vblank_count(dev, crtc))
|
||||
- vblwait->request.sequence) <= (1 << 23)));
|
||||
drm_vblank_put(dev, crtc);
|
||||
((drm_vblank_count(dev, crtc)
|
||||
- vblwait->request.sequence) <= (1 << 23)));
|
||||
DRM_UNLOCK();
|
||||
|
||||
microtime(&now);
|
||||
vblwait->reply.tval_sec = now.tv_sec;
|
||||
vblwait->reply.tval_usec = now.tv_usec;
|
||||
if (ret != EINTR) {
|
||||
struct timeval now;
|
||||
|
||||
microtime(&now);
|
||||
vblwait->reply.tval_sec = now.tv_sec;
|
||||
vblwait->reply.tval_usec = now.tv_usec;
|
||||
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void drm_vblank_cleanup(struct drm_device *dev)
|
||||
{
|
||||
/* Bail if the driver didn't call drm_vblank_init() */
|
||||
if (dev->num_crtcs == 0)
|
||||
return;
|
||||
|
||||
callout_stop(&dev->vblank_disable_timer);
|
||||
|
||||
vblank_disable_fn((void *)dev);
|
||||
|
||||
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
|
||||
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
{
|
||||
int i, ret = -ENOMEM;
|
||||
|
||||
callout_init(&dev->vblank_disable_timer, 0);
|
||||
DRM_SPININIT(&dev->vbl_lock, "drm_vblk");
|
||||
atomic_set(&dev->vbl_signal_pending, 0);
|
||||
dev->num_crtcs = num_crtcs;
|
||||
|
||||
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vbl_queue)
|
||||
goto err;
|
||||
|
||||
dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vbl_sigs)
|
||||
goto err;
|
||||
|
||||
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->_vblank_count)
|
||||
goto err;
|
||||
|
||||
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_refcount)
|
||||
goto err;
|
||||
|
||||
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_enabled)
|
||||
goto err;
|
||||
|
||||
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
|
||||
if (!dev->last_vblank)
|
||||
goto err;
|
||||
|
||||
dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_premodeset)
|
||||
goto err;
|
||||
|
||||
dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_offset)
|
||||
goto err;
|
||||
|
||||
/* Zero per-crtc vblank stuff */
|
||||
for (i = 0; i < num_crtcs; i++) {
|
||||
DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
|
||||
TAILQ_INIT(&dev->vbl_sigs[i]);
|
||||
atomic_set(&dev->_vblank_count[i], 0);
|
||||
atomic_set(&dev->vblank_refcount[i], 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
drm_vblank_cleanup(dev);
|
||||
done:
|
||||
drm_vblank_put(dev, crtc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -530,45 +559,53 @@ void drm_vbl_send_signals(struct drm_device *dev, int crtc )
|
|||
}
|
||||
#endif
|
||||
|
||||
void drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
drm_vbl_send_signals(dev, crtc);
|
||||
}
|
||||
|
||||
static void drm_locked_task(void *context, int pending __unused)
|
||||
{
|
||||
struct drm_device *dev = context;
|
||||
|
||||
DRM_LOCK();
|
||||
for (;;) {
|
||||
int ret;
|
||||
DRM_SPINLOCK(&dev->tsk_lock);
|
||||
|
||||
if (drm_lock_take(&dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT))
|
||||
{
|
||||
dev->lock.file_priv = NULL; /* kernel owned */
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
break; /* Got lock */
|
||||
}
|
||||
|
||||
/* Contention */
|
||||
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
|
||||
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
|
||||
PZERO | PCATCH, "drmlk2", 0);
|
||||
#else
|
||||
ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
|
||||
"drmlk2", 0);
|
||||
#endif
|
||||
if (ret != 0)
|
||||
return;
|
||||
DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */
|
||||
if (dev->locked_task_call == NULL ||
|
||||
drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) {
|
||||
DRM_UNLOCK();
|
||||
DRM_SPINUNLOCK(&dev->tsk_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->lock.file_priv = NULL; /* kernel owned */
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
|
||||
DRM_UNLOCK();
|
||||
|
||||
dev->locked_task_call(dev);
|
||||
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
|
||||
|
||||
dev->locked_task_call = NULL;
|
||||
|
||||
DRM_SPINUNLOCK(&dev->tsk_lock);
|
||||
}
|
||||
|
||||
void
|
||||
drm_locked_tasklet(struct drm_device *dev,
|
||||
void (*tasklet)(struct drm_device *dev))
|
||||
{
|
||||
DRM_SPINLOCK(&dev->tsk_lock);
|
||||
if (dev->locked_task_call != NULL) {
|
||||
DRM_SPINUNLOCK(&dev->tsk_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->locked_task_call = tasklet;
|
||||
DRM_SPINUNLOCK(&dev->tsk_lock);
|
||||
taskqueue_enqueue(taskqueue_swi, &dev->locked_task);
|
||||
}
|
||||
|
|
|
@ -180,6 +180,13 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
|
||||
return EINVAL;
|
||||
|
||||
DRM_SPINLOCK(&dev->tsk_lock);
|
||||
if (dev->locked_task_call != NULL) {
|
||||
dev->locked_task_call(dev);
|
||||
dev->locked_task_call = NULL;
|
||||
}
|
||||
DRM_SPINUNLOCK(&dev->tsk_lock);
|
||||
|
||||
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
|
||||
|
||||
DRM_LOCK();
|
||||
|
|
|
@ -71,6 +71,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
return NULL;
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
DRM_UNLOCK();
|
||||
ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
|
||||
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
|
||||
NULL, NULL, /* filtfunc, filtfuncargs */
|
||||
|
@ -79,6 +80,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
&dmah->tag);
|
||||
if (ret != 0) {
|
||||
free(dmah, M_DRM);
|
||||
DRM_LOCK();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -87,9 +89,10 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
if (ret != 0) {
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
free(dmah, M_DRM);
|
||||
DRM_LOCK();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DRM_LOCK();
|
||||
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
|
||||
drm_pci_busdma_callback, dmah, 0);
|
||||
if (ret != 0) {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
.PATH: ${.CURDIR}/..
|
||||
KMOD = i915
|
||||
NO_MAN = YES
|
||||
SRCS = i915_dma.c i915_drv.c i915_irq.c i915_mem.c
|
||||
SRCS = i915_dma.c i915_drv.c i915_irq.c i915_mem.c i915_suspend.c
|
||||
SRCS += device_if.h bus_if.h pci_if.h opt_drm.h
|
||||
CFLAGS += ${DEBUG_FLAGS} -I. -I..
|
||||
|
||||
|
|
|
@ -40,10 +40,38 @@ static drm_pci_id_list_t i915_pciidlist[] = {
|
|||
i915_PCI_IDS
|
||||
};
|
||||
|
||||
static int i915_suspend(device_t nbdev)
|
||||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!dev || !dev_priv) {
|
||||
DRM_ERROR("dev: 0x%lx, dev_priv: 0x%lx\n",
|
||||
(unsigned long) dev, (unsigned long) dev_priv);
|
||||
DRM_ERROR("DRM not initialized, aborting suspend.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
i915_save_state(dev);
|
||||
|
||||
return (bus_generic_suspend(nbdev));
|
||||
}
|
||||
|
||||
static int i915_resume(device_t nbdev)
|
||||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
i915_restore_state(dev);
|
||||
|
||||
return (bus_generic_resume(nbdev));
|
||||
}
|
||||
|
||||
static void i915_configure(struct drm_device *dev)
|
||||
{
|
||||
dev->driver.buf_priv_size = 1; /* No dev_priv */
|
||||
dev->driver.buf_priv_size = sizeof(drm_i915_private_t);
|
||||
dev->driver.load = i915_driver_load;
|
||||
dev->driver.unload = i915_driver_unload;
|
||||
dev->driver.firstopen = i915_driver_firstopen;
|
||||
dev->driver.preclose = i915_driver_preclose;
|
||||
dev->driver.lastclose = i915_driver_lastclose;
|
||||
dev->driver.device_is_agp = i915_driver_device_is_agp;
|
||||
|
@ -94,6 +122,8 @@ static device_method_t i915_methods[] = {
|
|||
/* Device interface */
|
||||
DEVMETHOD(device_probe, i915_probe),
|
||||
DEVMETHOD(device_attach, i915_attach),
|
||||
DEVMETHOD(device_suspend, i915_suspend),
|
||||
DEVMETHOD(device_resume, i915_resume),
|
||||
DEVMETHOD(device_detach, drm_detach),
|
||||
|
||||
{ 0, 0 }
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_suspend.c
|
|
@ -0,0 +1 @@
|
|||
../shared-core/radeon_microcode.h
|
|
@ -20,7 +20,8 @@ r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
|||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||
i810-objs := i810_drv.o i810_dma.o
|
||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||
i915_buffer.o i915_compat.o i915_execbuf.o \
|
||||
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
|
||||
i915_opregion.o \
|
||||
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
|
||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
||||
|
|
|
@ -39,6 +39,28 @@
|
|||
#define ATI_PCIE_WRITE 0x4
|
||||
#define ATI_PCIE_READ 0x8
|
||||
|
||||
static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart)
|
||||
{
|
||||
u32 page_base;
|
||||
|
||||
page_base = (u32)addr & ATI_PCIGART_PAGE_MASK;
|
||||
switch(gart_info->gart_reg_if) {
|
||||
case DRM_ATI_GART_IGP:
|
||||
page_base |= (upper_32_bits(addr) & 0xff) << 4;
|
||||
page_base |= 0xc;
|
||||
break;
|
||||
case DRM_ATI_GART_PCIE:
|
||||
page_base >>= 8;
|
||||
page_base |= (upper_32_bits(addr) & 0xff) << 24;
|
||||
page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
|
||||
break;
|
||||
default:
|
||||
case DRM_ATI_GART_PCI:
|
||||
break;
|
||||
}
|
||||
*pci_gart = cpu_to_le32(page_base);
|
||||
}
|
||||
|
||||
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
|
@ -80,7 +102,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
|||
for (i = 0; i < pages; i++) {
|
||||
if (!entry->busaddr[i])
|
||||
break;
|
||||
pci_unmap_single(dev->pdev, entry->busaddr[i],
|
||||
pci_unmap_page(dev->pdev, entry->busaddr[i],
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
|
@ -104,7 +126,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
struct drm_sg_mem *entry = dev->sg;
|
||||
void *address = NULL;
|
||||
unsigned long pages;
|
||||
u32 *pci_gart, page_base;
|
||||
u32 *pci_gart;
|
||||
dma_addr_t bus_address = 0;
|
||||
int i, j, ret = 0;
|
||||
int max_pages;
|
||||
|
@ -143,10 +165,8 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
|
||||
for (i = 0; i < pages; i++) {
|
||||
/* we need to support large memory configurations */
|
||||
entry->busaddr[i] = pci_map_single(dev->pdev,
|
||||
page_address(entry->
|
||||
pagelist[i]),
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
|
||||
0, PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
if (entry->busaddr[i] == 0) {
|
||||
DRM_ERROR("unable to map PCIGART pages!\n");
|
||||
drm_ati_pcigart_cleanup(dev, gart_info);
|
||||
|
@ -157,22 +177,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
|
||||
entry_addr = entry->busaddr[i];
|
||||
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
|
||||
page_base = (u32) entry_addr & ATI_PCIGART_PAGE_MASK;
|
||||
switch(gart_info->gart_reg_if) {
|
||||
case DRM_ATI_GART_IGP:
|
||||
page_base |= (upper_32_bits(entry_addr) & 0xff) << 4;
|
||||
page_base |= 0xc;
|
||||
break;
|
||||
case DRM_ATI_GART_PCIE:
|
||||
page_base >>= 8;
|
||||
page_base |= (upper_32_bits(entry_addr) & 0xff) << 24;
|
||||
page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
|
||||
break;
|
||||
default:
|
||||
case DRM_ATI_GART_PCI:
|
||||
break;
|
||||
}
|
||||
*pci_gart = cpu_to_le32(page_base);
|
||||
gart_insert_page_into_table(gart_info, entry_addr, pci_gart);
|
||||
pci_gart++;
|
||||
entry_addr += ATI_PCIGART_PAGE_SIZE;
|
||||
}
|
||||
|
|
|
@ -909,6 +909,14 @@ struct drm_device {
|
|||
/** \name VBLANK IRQ support */
|
||||
/*@{ */
|
||||
|
||||
/*
|
||||
* At load time, disabling the vblank interrupt won't be allowed since
|
||||
* old clients may not call the modeset ioctl and therefore misbehave.
|
||||
* Once the modeset ioctl *has* been called though, we can safely
|
||||
* disable them when unused.
|
||||
*/
|
||||
int vblank_disable_allowed;
|
||||
|
||||
wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
|
||||
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
|
||||
spinlock_t vbl_lock;
|
||||
|
@ -917,13 +925,12 @@ struct drm_device {
|
|||
atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
|
||||
u32 *last_vblank; /* protected by dev->vbl_lock, used */
|
||||
/* for wraparound handling */
|
||||
u32 *vblank_offset; /* used to track how many vblanks */
|
||||
int *vblank_enabled; /* so we don't call enable more than
|
||||
once per disable */
|
||||
u32 *vblank_premodeset; /* were lost during modeset */
|
||||
int *vblank_inmodeset; /* Display driver is setting mode */
|
||||
struct timer_list vblank_disable_timer;
|
||||
|
||||
unsigned long max_vblank_count; /**< size of vblank counter register */
|
||||
u32 max_vblank_count; /**< size of vblank counter register */
|
||||
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
|
||||
void (*locked_tasklet_func)(struct drm_device *dev);
|
||||
|
||||
|
@ -1241,7 +1248,6 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *
|
|||
extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
|
||||
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
|
||||
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
|
||||
extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
|
||||
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
|
||||
extern int drm_vblank_get(struct drm_device *dev, int crtc);
|
||||
extern void drm_vblank_put(struct drm_device *dev, int crtc);
|
||||
|
@ -1420,6 +1426,7 @@ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
|
|||
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
|
||||
|
||||
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
|
||||
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
|
||||
|
|
|
@ -597,7 +597,7 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
|||
int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
|
||||
|
||||
DRM_DEBUG("drm_agp_bind_ttm\n");
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
mem->type = AGP_USER_MEMORY;
|
||||
/* CACHED MAPPED implies not snooped memory */
|
||||
if (snooped)
|
||||
|
@ -696,7 +696,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
|
|||
agp_be->mem = NULL;
|
||||
|
||||
agp_be->bridge = dev->agp->bridge;
|
||||
agp_be->populated = FALSE;
|
||||
agp_be->populated = false;
|
||||
agp_be->backend.func = &agp_ttm_backend;
|
||||
agp_be->backend.dev = dev;
|
||||
|
||||
|
|
|
@ -362,4 +362,8 @@ extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
unsigned long address, int *type);
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
|
||||
#define drm_core_ioremap_wc drm_core_ioremap
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -617,9 +617,10 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
|
||||
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
|
||||
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
|
||||
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
|
||||
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
|
||||
ioctl = &drm_ioctls[nr];
|
||||
else {
|
||||
cmd = ioctl->cmd;
|
||||
} else {
|
||||
retcode = -EINVAL;
|
||||
goto err_i1;
|
||||
}
|
||||
|
@ -635,6 +636,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
goto err_i1;
|
||||
}
|
||||
#endif
|
||||
|
||||
func = ioctl->func;
|
||||
/* is there a local override? */
|
||||
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
|
||||
|
@ -659,7 +661,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
retcode = func(dev, kdata, file_priv);
|
||||
}
|
||||
|
||||
if ((retcode == 0) && (cmd & IOC_OUT)) {
|
||||
if (cmd & IOC_OUT) {
|
||||
if (copy_to_user((void __user *)arg, kdata,
|
||||
_IOC_SIZE(cmd)) != 0)
|
||||
retcode = -EACCES;
|
||||
|
|
|
@ -77,10 +77,16 @@ static void vblank_disable_fn(unsigned long arg)
|
|||
unsigned long irqflags;
|
||||
int i;
|
||||
|
||||
if (!dev->vblank_disable_allowed)
|
||||
return;
|
||||
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
|
||||
dev->vblank_enabled[i]) {
|
||||
DRM_DEBUG("disabling vblank on crtc %d\n", i);
|
||||
dev->last_vblank[i] =
|
||||
dev->driver->get_vblank_counter(dev, i);
|
||||
dev->driver->disable_vblank(dev, i);
|
||||
dev->vblank_enabled[i] = 0;
|
||||
}
|
||||
|
@ -110,10 +116,8 @@ static void drm_vblank_cleanup(struct drm_device *dev)
|
|||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
|
||||
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
|
||||
dev->num_crtcs, DRM_MEM_DRIVER);
|
||||
drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
|
@ -158,13 +162,9 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
|||
if (!dev->last_vblank)
|
||||
goto err;
|
||||
|
||||
dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_premodeset)
|
||||
goto err;
|
||||
|
||||
dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_offset)
|
||||
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!dev->vblank_inmodeset)
|
||||
goto err;
|
||||
|
||||
/* Zero per-crtc vblank stuff */
|
||||
|
@ -175,6 +175,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
|||
atomic_set(&dev->vblank_refcount[i], 0);
|
||||
}
|
||||
|
||||
dev->vblank_disable_allowed = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -335,8 +337,7 @@ int drm_control(struct drm_device *dev, void *data,
|
|||
*/
|
||||
u32 drm_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return atomic_read(&dev->_vblank_count[crtc]) +
|
||||
dev->vblank_offset[crtc];
|
||||
return atomic_read(&dev->_vblank_count[crtc]);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_count);
|
||||
|
||||
|
@ -349,10 +350,15 @@ EXPORT_SYMBOL(drm_vblank_count);
|
|||
* (specified by @crtc). Deal with wraparound, if it occurred, and
|
||||
* update the last read value so we can deal with wraparound on the next
|
||||
* call if necessary.
|
||||
*
|
||||
* Only necessary when going from off->on, to account for frames we
|
||||
* didn't get an interrupt for.
|
||||
*
|
||||
* Note: caller must hold dev->vbl_lock since this reads & writes
|
||||
* device vblank fields.
|
||||
*/
|
||||
void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
u32 cur_vblank, diff;
|
||||
|
||||
/*
|
||||
|
@ -363,20 +369,19 @@ void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
* a long time.
|
||||
*/
|
||||
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
diff = cur_vblank - dev->last_vblank[crtc];
|
||||
if (cur_vblank < dev->last_vblank[crtc]) {
|
||||
diff = dev->max_vblank_count -
|
||||
dev->last_vblank[crtc];
|
||||
diff += cur_vblank;
|
||||
} else {
|
||||
diff = cur_vblank - dev->last_vblank[crtc];
|
||||
diff += dev->max_vblank_count;
|
||||
|
||||
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
|
||||
crtc, dev->last_vblank[crtc], cur_vblank, diff);
|
||||
}
|
||||
dev->last_vblank[crtc] = cur_vblank;
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
||||
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
|
||||
crtc, diff);
|
||||
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_update_vblank_count);
|
||||
|
||||
/**
|
||||
* drm_vblank_get - get a reference count on vblank events
|
||||
|
@ -384,9 +389,7 @@ EXPORT_SYMBOL(drm_update_vblank_count);
|
|||
* @crtc: which CRTC to own
|
||||
*
|
||||
* Acquire a reference count on vblank events to avoid having them disabled
|
||||
* while in use. Note callers will probably want to update the master counter
|
||||
* using drm_update_vblank_count() above before calling this routine so that
|
||||
* wakeups occur on the right vblank event.
|
||||
* while in use.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero on success, nonzero on failure.
|
||||
|
@ -396,15 +399,17 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
|||
unsigned long irqflags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
/* Going from 0->1 means we have to enable interrupts again */
|
||||
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
|
||||
!dev->vblank_enabled[crtc]) {
|
||||
ret = dev->driver->enable_vblank(dev, crtc);
|
||||
if (ret)
|
||||
atomic_dec(&dev->vblank_refcount[crtc]);
|
||||
else
|
||||
else {
|
||||
dev->vblank_enabled[crtc] = 1;
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
||||
|
@ -434,13 +439,21 @@ EXPORT_SYMBOL(drm_vblank_put);
|
|||
*
|
||||
* Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
|
||||
* ioctls around modesetting so that any lost vblank events are accounted for.
|
||||
*
|
||||
* Generally the counter will reset across mode sets. If interrupts are
|
||||
* enabled around this call, we don't have to do anything since the counter
|
||||
* will have already been incremented.
|
||||
*/
|
||||
int drm_modeset_ctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_modeset_ctl *modeset = data;
|
||||
unsigned long irqflags;
|
||||
int crtc, ret = 0;
|
||||
u32 new;
|
||||
|
||||
/* If drm_vblank_init() hasn't been called yet, just no-op */
|
||||
if (!dev->num_crtcs)
|
||||
goto out;
|
||||
|
||||
crtc = modeset->crtc;
|
||||
if (crtc >= dev->num_crtcs) {
|
||||
|
@ -448,14 +461,28 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid all the problems that might happen if interrupts
|
||||
* were enabled/disabled around or between these calls, we just
|
||||
* have the kernel take a reference on the CRTC (just once though
|
||||
* to avoid corrupting the count if multiple, mismatch calls occur),
|
||||
* so that interrupts remain enabled in the interim.
|
||||
*/
|
||||
switch (modeset->cmd) {
|
||||
case _DRM_PRE_MODESET:
|
||||
dev->vblank_premodeset[crtc] =
|
||||
dev->driver->get_vblank_counter(dev, crtc);
|
||||
if (!dev->vblank_inmodeset[crtc]) {
|
||||
dev->vblank_inmodeset[crtc] = 1;
|
||||
drm_vblank_get(dev, crtc);
|
||||
}
|
||||
break;
|
||||
case _DRM_POST_MODESET:
|
||||
new = dev->driver->get_vblank_counter(dev, crtc);
|
||||
dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
|
||||
if (dev->vblank_inmodeset[crtc]) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
dev->vblank_disable_allowed = 1;
|
||||
dev->vblank_inmodeset[crtc] = 0;
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
drm_vblank_put(dev, crtc);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -489,7 +516,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
union drm_wait_vblank *vblwait = data;
|
||||
struct timeval now;
|
||||
int ret = 0;
|
||||
unsigned int flags, seq, crtc;
|
||||
|
||||
|
@ -510,7 +536,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
if (crtc >= dev->num_crtcs)
|
||||
return -EINVAL;
|
||||
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
ret = drm_vblank_get(dev, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq = drm_vblank_count(dev, crtc);
|
||||
|
||||
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
|
||||
|
@ -520,7 +548,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
case _DRM_VBLANK_ABSOLUTE:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
|
||||
|
@ -553,15 +582,18 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
|
||||
if (atomic_read(&dev->vbl_signal_pending) >= 100) {
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
return -EBUSY;
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
||||
vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
|
||||
DRM_MEM_DRIVER);
|
||||
if (!vbl_sig)
|
||||
return -ENOMEM;
|
||||
if (!vbl_sig) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = drm_vblank_get(dev, crtc);
|
||||
if (ret) {
|
||||
|
@ -584,23 +616,23 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
|
||||
vblwait->reply.sequence = seq;
|
||||
} else {
|
||||
unsigned long cur_vblank;
|
||||
|
||||
ret = drm_vblank_get(dev, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
|
||||
(((cur_vblank = drm_vblank_count(dev, crtc))
|
||||
((drm_vblank_count(dev, crtc)
|
||||
- vblwait->request.sequence) <= (1 << 23)));
|
||||
drm_vblank_put(dev, crtc);
|
||||
do_gettimeofday(&now);
|
||||
|
||||
vblwait->reply.tval_sec = now.tv_sec;
|
||||
vblwait->reply.tval_usec = now.tv_usec;
|
||||
vblwait->reply.sequence = cur_vblank;
|
||||
if (ret != -EINTR) {
|
||||
struct timeval now;
|
||||
|
||||
do_gettimeofday(&now);
|
||||
|
||||
vblwait->reply.tval_sec = now.tv_sec;
|
||||
vblwait->reply.tval_usec = now.tv_usec;
|
||||
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
done:
|
||||
drm_vblank_put(dev, crtc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -654,7 +686,7 @@ static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
|
|||
*/
|
||||
void drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
drm_vbl_send_signals(dev, crtc);
|
||||
}
|
||||
|
|
|
@ -105,14 +105,19 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
ret ? "interrupted" : "has lock");
|
||||
if (ret) return ret;
|
||||
|
||||
sigemptyset(&dev->sigmask);
|
||||
sigaddset(&dev->sigmask, SIGSTOP);
|
||||
sigaddset(&dev->sigmask, SIGTSTP);
|
||||
sigaddset(&dev->sigmask, SIGTTIN);
|
||||
sigaddset(&dev->sigmask, SIGTTOU);
|
||||
dev->sigdata.context = lock->context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
|
||||
/* don't set the block all signals on the master process for now
|
||||
* really probably not the correct answer but lets us debug xkb
|
||||
* xserver for now */
|
||||
if (!file_priv->master) {
|
||||
sigemptyset(&dev->sigmask);
|
||||
sigaddset(&dev->sigmask, SIGSTOP);
|
||||
sigaddset(&dev->sigmask, SIGTSTP);
|
||||
sigaddset(&dev->sigmask, SIGTTIN);
|
||||
sigaddset(&dev->sigmask, SIGTTOU);
|
||||
dev->sigdata.context = lock->context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
|
||||
}
|
||||
|
||||
if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
|
||||
dev->driver->dma_ready(dev);
|
||||
|
|
|
@ -350,6 +350,15 @@ void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(drm_core_ioremap);
|
||||
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
|
||||
void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = ioremap_wc(map->offset, map->size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_core_ioremap_wc);
|
||||
#endif
|
||||
|
||||
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if (!map->handle || !map->size)
|
||||
|
|
|
@ -63,211 +63,9 @@ static struct drm_bo_driver i915_bo_driver = {
|
|||
};
|
||||
#endif
|
||||
|
||||
enum pipe {
|
||||
PIPE_A = 0,
|
||||
PIPE_B,
|
||||
};
|
||||
|
||||
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
|
||||
else
|
||||
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
|
||||
}
|
||||
|
||||
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
|
||||
u32 *array;
|
||||
int i;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return;
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
array = dev_priv->save_palette_a;
|
||||
else
|
||||
array = dev_priv->save_palette_b;
|
||||
|
||||
for(i = 0; i < 256; i++)
|
||||
array[i] = I915_READ(reg + (i << 2));
|
||||
}
|
||||
|
||||
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
|
||||
u32 *array;
|
||||
int i;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return;
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
array = dev_priv->save_palette_a;
|
||||
else
|
||||
array = dev_priv->save_palette_b;
|
||||
|
||||
for(i = 0; i < 256; i++)
|
||||
I915_WRITE(reg + (i << 2), array[i]);
|
||||
}
|
||||
|
||||
static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
|
||||
{
|
||||
outb(reg, index_port);
|
||||
return inb(data_port);
|
||||
}
|
||||
|
||||
static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
|
||||
{
|
||||
inb(st01);
|
||||
outb(palette_enable | reg, VGA_AR_INDEX);
|
||||
return inb(VGA_AR_DATA_READ);
|
||||
}
|
||||
|
||||
static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
|
||||
{
|
||||
inb(st01);
|
||||
outb(palette_enable | reg, VGA_AR_INDEX);
|
||||
outb(val, VGA_AR_DATA_WRITE);
|
||||
}
|
||||
|
||||
static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
|
||||
{
|
||||
outb(reg, index_port);
|
||||
outb(val, data_port);
|
||||
}
|
||||
|
||||
static void i915_save_vga(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
u16 cr_index, cr_data, st01;
|
||||
|
||||
/* VGA color palette registers */
|
||||
dev_priv->saveDACMASK = inb(VGA_DACMASK);
|
||||
/* DACCRX automatically increments during read */
|
||||
outb(0, VGA_DACRX);
|
||||
/* Read 3 bytes of color data from each index */
|
||||
for (i = 0; i < 256 * 3; i++)
|
||||
dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
|
||||
|
||||
/* MSR bits */
|
||||
dev_priv->saveMSR = inb(VGA_MSR_READ);
|
||||
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
|
||||
cr_index = VGA_CR_INDEX_CGA;
|
||||
cr_data = VGA_CR_DATA_CGA;
|
||||
st01 = VGA_ST01_CGA;
|
||||
} else {
|
||||
cr_index = VGA_CR_INDEX_MDA;
|
||||
cr_data = VGA_CR_DATA_MDA;
|
||||
st01 = VGA_ST01_MDA;
|
||||
}
|
||||
|
||||
/* CRT controller regs */
|
||||
i915_write_indexed(cr_index, cr_data, 0x11,
|
||||
i915_read_indexed(cr_index, cr_data, 0x11) &
|
||||
(~0x80));
|
||||
for (i = 0; i <= 0x24; i++)
|
||||
dev_priv->saveCR[i] =
|
||||
i915_read_indexed(cr_index, cr_data, i);
|
||||
/* Make sure we don't turn off CR group 0 writes */
|
||||
dev_priv->saveCR[0x11] &= ~0x80;
|
||||
|
||||
/* Attribute controller registers */
|
||||
inb(st01);
|
||||
dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
|
||||
for (i = 0; i <= 0x14; i++)
|
||||
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
|
||||
inb(st01);
|
||||
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
|
||||
inb(st01);
|
||||
|
||||
/* Graphics controller registers */
|
||||
for (i = 0; i < 9; i++)
|
||||
dev_priv->saveGR[i] =
|
||||
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
|
||||
|
||||
dev_priv->saveGR[0x10] =
|
||||
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
|
||||
dev_priv->saveGR[0x11] =
|
||||
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
|
||||
dev_priv->saveGR[0x18] =
|
||||
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
|
||||
|
||||
/* Sequencer registers */
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->saveSR[i] =
|
||||
i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
|
||||
}
|
||||
|
||||
static void i915_restore_vga(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
u16 cr_index, cr_data, st01;
|
||||
|
||||
/* MSR bits */
|
||||
outb(dev_priv->saveMSR, VGA_MSR_WRITE);
|
||||
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
|
||||
cr_index = VGA_CR_INDEX_CGA;
|
||||
cr_data = VGA_CR_DATA_CGA;
|
||||
st01 = VGA_ST01_CGA;
|
||||
} else {
|
||||
cr_index = VGA_CR_INDEX_MDA;
|
||||
cr_data = VGA_CR_DATA_MDA;
|
||||
st01 = VGA_ST01_MDA;
|
||||
}
|
||||
|
||||
/* Sequencer registers, don't write SR07 */
|
||||
for (i = 0; i < 7; i++)
|
||||
i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
|
||||
dev_priv->saveSR[i]);
|
||||
|
||||
/* CRT controller regs */
|
||||
/* Enable CR group 0 writes */
|
||||
i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
|
||||
for (i = 0; i <= 0x24; i++)
|
||||
i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
|
||||
|
||||
/* Graphics controller regs */
|
||||
for (i = 0; i < 9; i++)
|
||||
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
|
||||
dev_priv->saveGR[i]);
|
||||
|
||||
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
|
||||
dev_priv->saveGR[0x10]);
|
||||
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
|
||||
dev_priv->saveGR[0x11]);
|
||||
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
|
||||
dev_priv->saveGR[0x18]);
|
||||
|
||||
/* Attribute controller registers */
|
||||
inb(st01); /* switch back to index mode */
|
||||
for (i = 0; i <= 0x14; i++)
|
||||
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
|
||||
inb(st01); /* switch back to index mode */
|
||||
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
|
||||
inb(st01);
|
||||
|
||||
/* VGA color palette registers */
|
||||
outb(dev_priv->saveDACMASK, VGA_DACMASK);
|
||||
/* DACCRX automatically increments during read */
|
||||
outb(0, VGA_DACWX);
|
||||
/* Read 3 bytes of color data from each index */
|
||||
for (i = 0; i < 256 * 3; i++)
|
||||
outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
|
||||
|
||||
}
|
||||
|
||||
static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
if (!dev || !dev_priv) {
|
||||
printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
|
||||
|
@ -279,122 +77,10 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
return 0;
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
|
||||
|
||||
/* Display arbitration control */
|
||||
dev_priv->saveDSPARB = I915_READ(DSPARB);
|
||||
i915_save_state(dev);
|
||||
|
||||
/* Pipe & plane A info */
|
||||
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
|
||||
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
|
||||
dev_priv->saveFPA0 = I915_READ(FPA0);
|
||||
dev_priv->saveFPA1 = I915_READ(FPA1);
|
||||
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
|
||||
if (IS_I965G(dev))
|
||||
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
|
||||
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
|
||||
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
|
||||
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
|
||||
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
|
||||
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
|
||||
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
|
||||
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
|
||||
|
||||
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
|
||||
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
|
||||
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
|
||||
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
|
||||
dev_priv->saveDSPABASE = I915_READ(DSPABASE);
|
||||
if (IS_I965G(dev)) {
|
||||
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
|
||||
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
|
||||
}
|
||||
i915_save_palette(dev, PIPE_A);
|
||||
dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
|
||||
|
||||
/* Pipe & plane B info */
|
||||
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
|
||||
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
|
||||
dev_priv->saveFPB0 = I915_READ(FPB0);
|
||||
dev_priv->saveFPB1 = I915_READ(FPB1);
|
||||
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
|
||||
if (IS_I965G(dev))
|
||||
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
|
||||
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
|
||||
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
|
||||
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
|
||||
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
|
||||
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
|
||||
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
|
||||
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
|
||||
|
||||
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
|
||||
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
|
||||
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
|
||||
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
|
||||
dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
|
||||
if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
|
||||
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
|
||||
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
|
||||
}
|
||||
i915_save_palette(dev, PIPE_B);
|
||||
dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
|
||||
|
||||
/* CRT state */
|
||||
dev_priv->saveADPA = I915_READ(ADPA);
|
||||
|
||||
/* LVDS state */
|
||||
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
|
||||
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
|
||||
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
|
||||
if (IS_I965G(dev))
|
||||
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
|
||||
if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
dev_priv->saveLVDS = I915_READ(LVDS);
|
||||
if (!IS_I830(dev) && !IS_845G(dev))
|
||||
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
|
||||
dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
|
||||
dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
|
||||
dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
|
||||
|
||||
/* FIXME: save TV & SDVO state */
|
||||
|
||||
/* FBC state */
|
||||
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
|
||||
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
|
||||
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
|
||||
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
|
||||
|
||||
/* Interrupt state */
|
||||
dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
|
||||
dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
|
||||
dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
|
||||
|
||||
/* VGA state */
|
||||
dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
|
||||
dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
|
||||
dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
|
||||
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
|
||||
|
||||
/* Clock gating state */
|
||||
dev_priv->saveD_STATE = I915_READ(D_STATE);
|
||||
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
|
||||
|
||||
/* Cache mode state */
|
||||
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
|
||||
|
||||
/* Memory Arbitration state */
|
||||
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
|
||||
|
||||
/* Scratch space */
|
||||
for (i = 0; i < 16; i++) {
|
||||
dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
|
||||
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
|
||||
|
||||
i915_save_vga(dev);
|
||||
intel_opregion_free(dev);
|
||||
|
||||
if (state.event == PM_EVENT_SUSPEND) {
|
||||
/* Shut down the device */
|
||||
|
@ -407,154 +93,15 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
|
||||
static int i915_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev))
|
||||
return -1;
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
|
||||
i915_restore_state(dev);
|
||||
|
||||
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
|
||||
|
||||
/* Pipe & plane A info */
|
||||
/* Prime the clock */
|
||||
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
|
||||
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
|
||||
~DPLL_VCO_ENABLE);
|
||||
udelay(150);
|
||||
}
|
||||
I915_WRITE(FPA0, dev_priv->saveFPA0);
|
||||
I915_WRITE(FPA1, dev_priv->saveFPA1);
|
||||
/* Actually enable it */
|
||||
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
|
||||
udelay(150);
|
||||
if (IS_I965G(dev))
|
||||
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
|
||||
udelay(150);
|
||||
|
||||
/* Restore mode */
|
||||
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
|
||||
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
|
||||
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
|
||||
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
|
||||
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
|
||||
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
|
||||
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
|
||||
|
||||
/* Restore plane info */
|
||||
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
|
||||
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
|
||||
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
|
||||
I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
|
||||
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
|
||||
if (IS_I965G(dev)) {
|
||||
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
|
||||
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
|
||||
}
|
||||
|
||||
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
|
||||
|
||||
i915_restore_palette(dev, PIPE_A);
|
||||
/* Enable the plane */
|
||||
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
|
||||
I915_WRITE(DSPABASE, I915_READ(DSPABASE));
|
||||
|
||||
/* Pipe & plane B info */
|
||||
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
|
||||
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
|
||||
~DPLL_VCO_ENABLE);
|
||||
udelay(150);
|
||||
}
|
||||
I915_WRITE(FPB0, dev_priv->saveFPB0);
|
||||
I915_WRITE(FPB1, dev_priv->saveFPB1);
|
||||
/* Actually enable it */
|
||||
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
|
||||
udelay(150);
|
||||
if (IS_I965G(dev))
|
||||
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
|
||||
udelay(150);
|
||||
|
||||
/* Restore mode */
|
||||
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
|
||||
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
|
||||
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
|
||||
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
|
||||
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
|
||||
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
|
||||
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
|
||||
|
||||
/* Restore plane info */
|
||||
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
|
||||
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
|
||||
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
|
||||
I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
|
||||
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
|
||||
if (IS_I965G(dev)) {
|
||||
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
|
||||
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
|
||||
}
|
||||
|
||||
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
|
||||
|
||||
i915_restore_palette(dev, PIPE_B);
|
||||
/* Enable the plane */
|
||||
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
|
||||
I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
|
||||
|
||||
/* CRT state */
|
||||
I915_WRITE(ADPA, dev_priv->saveADPA);
|
||||
|
||||
/* LVDS state */
|
||||
if (IS_I965G(dev))
|
||||
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
|
||||
if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
I915_WRITE(LVDS, dev_priv->saveLVDS);
|
||||
if (!IS_I830(dev) && !IS_845G(dev))
|
||||
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
|
||||
|
||||
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
|
||||
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
|
||||
I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
|
||||
I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
|
||||
I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
|
||||
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
|
||||
|
||||
/* FIXME: restore TV & SDVO state */
|
||||
|
||||
/* FBC info */
|
||||
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
|
||||
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
|
||||
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
|
||||
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
|
||||
|
||||
/* VGA state */
|
||||
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
|
||||
I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
|
||||
I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
|
||||
I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
|
||||
udelay(150);
|
||||
|
||||
/* Clock gating state */
|
||||
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
|
||||
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
|
||||
|
||||
/* Cache mode state */
|
||||
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
|
||||
|
||||
/* Memory arbitration state */
|
||||
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
|
||||
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
|
||||
|
||||
i915_restore_vga(dev);
|
||||
intel_opregion_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
|
|||
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
|
||||
dev_priv->flush_flags = fc->pending_flush;
|
||||
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
|
||||
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
|
||||
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
|
||||
dev_priv->flush_pending = 1;
|
||||
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
|
||||
}
|
||||
|
|
|
@ -561,11 +561,11 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
|
|||
dev_priv->mm.next_gem_seqno++;
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(CMD_STORE_DWORD_IDX);
|
||||
OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
|
||||
OUT_RING(MI_STORE_DWORD_INDEX);
|
||||
OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
OUT_RING(seqno);
|
||||
|
||||
OUT_RING(GFX_OP_USER_INTERRUPT);
|
||||
OUT_RING(MI_USER_INTERRUPT);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
DRM_DEBUG("%d\n", seqno);
|
||||
|
@ -591,7 +591,7 @@ uint32_t
|
|||
i915_retire_commands(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
|
||||
uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
|
||||
uint32_t flush_domains = 0;
|
||||
RING_LOCALS;
|
||||
|
||||
|
@ -818,7 +818,7 @@ i915_gem_flush(struct drm_device *dev,
|
|||
* are flushed at any MI_FLUSH.
|
||||
*/
|
||||
|
||||
cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
|
||||
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
|
||||
if ((invalidate_domains|flush_domains) &
|
||||
I915_GEM_DOMAIN_RENDER)
|
||||
cmd &= ~MI_NO_WRITE_FLUSH;
|
||||
|
@ -2381,14 +2381,14 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
|
|||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
/* Stop the ring if it's running. */
|
||||
I915_WRITE(LP_RING + RING_LEN, 0);
|
||||
I915_WRITE(LP_RING + RING_HEAD, 0);
|
||||
I915_WRITE(LP_RING + RING_TAIL, 0);
|
||||
I915_WRITE(LP_RING + RING_START, 0);
|
||||
I915_WRITE(PRB0_CTL, 0);
|
||||
I915_WRITE(PRB0_HEAD, 0);
|
||||
I915_WRITE(PRB0_TAIL, 0);
|
||||
I915_WRITE(PRB0_START, 0);
|
||||
|
||||
/* Initialize the ring. */
|
||||
I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
|
||||
I915_WRITE(LP_RING + RING_LEN,
|
||||
I915_WRITE(PRB0_START, obj_priv->gtt_offset);
|
||||
I915_WRITE(PRB0_CTL,
|
||||
((obj->size - 4096) & RING_NR_PAGES) |
|
||||
RING_NO_REPORT |
|
||||
RING_VALID);
|
||||
|
|
|
@ -220,15 +220,15 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
|
|||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
DRM_PROC_PRINT("Interrupt enable: %08x\n",
|
||||
I915_READ(I915REG_INT_ENABLE_R));
|
||||
I915_READ(IER));
|
||||
DRM_PROC_PRINT("Interrupt identity: %08x\n",
|
||||
I915_READ(I915REG_INT_IDENTITY_R));
|
||||
I915_READ(IIR));
|
||||
DRM_PROC_PRINT("Interrupt mask: %08x\n",
|
||||
I915_READ(I915REG_INT_MASK_R));
|
||||
I915_READ(IMR));
|
||||
DRM_PROC_PRINT("Pipe A stat: %08x\n",
|
||||
I915_READ(I915REG_PIPEASTAT));
|
||||
I915_READ(PIPEASTAT));
|
||||
DRM_PROC_PRINT("Pipe B stat: %08x\n",
|
||||
I915_READ(I915REG_PIPEBSTAT));
|
||||
I915_READ(PIPEBSTAT));
|
||||
DRM_PROC_PRINT("Interrupts received: %d\n",
|
||||
atomic_read(&dev_priv->irq_received));
|
||||
DRM_PROC_PRINT("Current sequence: %d\n",
|
||||
|
|
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2008 Intel Corporation <hong.liu@intel.com>
|
||||
* Copyright 2008 Red Hat <mjg@redhat.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define PCI_ASLE 0xe4
|
||||
#define PCI_ASLS 0xfc
|
||||
|
||||
#define OPREGION_SZ (8*1024)
|
||||
#define OPREGION_HEADER_OFFSET 0
|
||||
#define OPREGION_ACPI_OFFSET 0x100
|
||||
#define OPREGION_SWSCI_OFFSET 0x200
|
||||
#define OPREGION_ASLE_OFFSET 0x300
|
||||
#define OPREGION_VBT_OFFSET 0x1000
|
||||
|
||||
#define OPREGION_SIGNATURE "IntelGraphicsMem"
|
||||
#define MBOX_ACPI (1<<0)
|
||||
#define MBOX_SWSCI (1<<1)
|
||||
#define MBOX_ASLE (1<<2)
|
||||
|
||||
/* _DOD id definitions */
|
||||
#define OUTPUT_CONNECTOR_MSK 0xf000
|
||||
#define OUTPUT_CONNECTOR_OFFSET 12
|
||||
|
||||
#define OUTPUT_PORT_MSK 0x00f0
|
||||
#define OUTPUT_PORT_OFFSET 4
|
||||
#define OUTPUT_PORT_ANALOG 0
|
||||
#define OUTPUT_PORT_LVDS 1
|
||||
#define OUTPUT_PORT_SDVOB 2
|
||||
#define OUTPUT_PORT_SDVOC 3
|
||||
#define OUTPUT_PORT_TV 4
|
||||
|
||||
#define OUTPUT_DISPLAY_MSK 0x0f00
|
||||
#define OUTPUT_DISPLAY_OFFSET 8
|
||||
#define OUTPUT_DISPLAY_OTHER 0
|
||||
#define OUTPUT_DISPLAY_VGA 1
|
||||
#define OUTPUT_DISPLAY_TV 2
|
||||
#define OUTPUT_DISPLAY_DIGI 3
|
||||
#define OUTPUT_DISPLAY_FLAT_PANEL 4
|
||||
|
||||
/* predefined id for integrated LVDS and VGA connector */
|
||||
#define OUTPUT_INT_LVDS 0x00000110
|
||||
#define OUTPUT_INT_VGA 0x80000100
|
||||
|
||||
struct opregion_header {
|
||||
u8 signature[16];
|
||||
u32 size;
|
||||
u32 opregion_ver;
|
||||
u8 bios_ver[32];
|
||||
u8 vbios_ver[16];
|
||||
u8 driver_ver[16];
|
||||
u32 mboxes;
|
||||
u8 reserved[164];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* OpRegion mailbox #1: public ACPI methods */
|
||||
struct opregion_acpi {
|
||||
u32 drdy; /* driver readiness */
|
||||
u32 csts; /* notification status */
|
||||
u32 cevt; /* current event */
|
||||
u8 rsvd1[20];
|
||||
u32 didl[8]; /* supported display devices ID list */
|
||||
u32 cpdl[8]; /* currently presented display list */
|
||||
u32 cadl[8]; /* currently active display list */
|
||||
u32 nadl[8]; /* next active devices list */
|
||||
u32 aslp; /* ASL sleep time-out */
|
||||
u32 tidx; /* toggle table index */
|
||||
u32 chpd; /* current hotplug enable indicator */
|
||||
u32 clid; /* current lid state*/
|
||||
u32 cdck; /* current docking state */
|
||||
u32 sxsw; /* Sx state resume */
|
||||
u32 evts; /* ASL supported events */
|
||||
u32 cnot; /* current OS notification */
|
||||
u32 nrdy; /* driver status */
|
||||
u8 rsvd2[60];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* OpRegion mailbox #2: SWSCI */
|
||||
struct opregion_swsci {
|
||||
u32 scic; /* SWSCI command|status|data */
|
||||
u32 parm; /* command parameters */
|
||||
u32 dslp; /* driver sleep time-out */
|
||||
u8 rsvd[244];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* OpRegion mailbox #3: ASLE */
|
||||
struct opregion_asle {
|
||||
u32 ardy; /* driver readiness */
|
||||
u32 aslc; /* ASLE interrupt command */
|
||||
u32 tche; /* technology enabled indicator */
|
||||
u32 alsi; /* current ALS illuminance reading */
|
||||
u32 bclp; /* backlight brightness to set */
|
||||
u32 pfit; /* panel fitting state */
|
||||
u32 cblv; /* current brightness level */
|
||||
u16 bclm[20]; /* backlight level duty cycle mapping table */
|
||||
u32 cpfm; /* current panel fitting mode */
|
||||
u32 epfm; /* enabled panel fitting modes */
|
||||
u8 plut[74]; /* panel LUT and identifier */
|
||||
u32 pfmb; /* PWM freq and min brightness */
|
||||
u8 rsvd[102];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* ASLE irq request bits */
|
||||
#define ASLE_SET_ALS_ILLUM (1 << 0)
|
||||
#define ASLE_SET_BACKLIGHT (1 << 1)
|
||||
#define ASLE_SET_PFIT (1 << 2)
|
||||
#define ASLE_SET_PWM_FREQ (1 << 3)
|
||||
#define ASLE_REQ_MSK 0xf
|
||||
|
||||
/* response bits of ASLE irq request */
|
||||
#define ASLE_ALS_ILLUM_FAIL (2<<10)
|
||||
#define ASLE_BACKLIGHT_FAIL (2<<12)
|
||||
#define ASLE_PFIT_FAIL (2<<14)
|
||||
#define ASLE_PWM_FREQ_FAIL (2<<16)
|
||||
|
||||
/* ASLE backlight brightness to set */
|
||||
#define ASLE_BCLP_VALID (1<<31)
|
||||
#define ASLE_BCLP_MSK (~(1<<31))
|
||||
|
||||
/* ASLE panel fitting request */
|
||||
#define ASLE_PFIT_VALID (1<<31)
|
||||
#define ASLE_PFIT_CENTER (1<<0)
|
||||
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
|
||||
#define ASLE_PFIT_STRETCH_GFX (1<<2)
|
||||
|
||||
/* PWM frequency and minimum brightness */
|
||||
#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
|
||||
#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
|
||||
#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
|
||||
#define ASLE_PFMB_PWM_VALID (1<<31)
|
||||
|
||||
#define ASLE_CBLV_VALID (1<<31)
|
||||
|
||||
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
u32 blc_pwm_ctl;
|
||||
|
||||
if (!(bclp & ASLE_BCLP_VALID))
|
||||
return ASLE_BACKLIGHT_FAIL;
|
||||
|
||||
bclp &= ASLE_BCLP_MSK;
|
||||
if (bclp < 0 || bclp > 255)
|
||||
return ASLE_BACKLIGHT_FAIL;
|
||||
|
||||
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
|
||||
blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1));
|
||||
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
if (pfmb & ASLE_PFMB_PWM_VALID) {
|
||||
u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
|
||||
u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
|
||||
blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
pwm = pwm >> 9;
|
||||
// FIXME - what do we do with the PWM?
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
|
||||
{
|
||||
if (!(pfit & ASLE_PFIT_VALID))
|
||||
return ASLE_PFIT_FAIL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void opregion_asle_intr(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
u32 asle_stat = 0;
|
||||
u32 asle_req;
|
||||
|
||||
if (!asle)
|
||||
return;
|
||||
|
||||
asle_req = asle->aslc & ASLE_REQ_MSK;
|
||||
|
||||
if (!asle_req) {
|
||||
DRM_DEBUG("non asle set request??\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (asle_req & ASLE_SET_ALS_ILLUM)
|
||||
asle_stat |= asle_set_als_illum(dev, asle->alsi);
|
||||
|
||||
if (asle_req & ASLE_SET_BACKLIGHT)
|
||||
asle_stat |= asle_set_backlight(dev, asle->bclp);
|
||||
|
||||
if (asle_req & ASLE_SET_PFIT)
|
||||
asle_stat |= asle_set_pfit(dev, asle->pfit);
|
||||
|
||||
if (asle_req & ASLE_SET_PWM_FREQ)
|
||||
asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
|
||||
|
||||
asle->aslc = asle_stat;
|
||||
}
|
||||
|
||||
#define ASLE_ALS_EN (1<<0)
|
||||
#define ASLE_BLC_EN (1<<1)
|
||||
#define ASLE_PFIT_EN (1<<2)
|
||||
#define ASLE_PFMB_EN (1<<3)
|
||||
|
||||
void opregion_enable_asle(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
|
||||
if (asle) {
|
||||
if (IS_MOBILE(dev)) {
|
||||
u32 pipeb_stats = I915_READ(PIPEBSTAT);
|
||||
/* Some hardware uses the legacy backlight controller
|
||||
to signal interrupts, so we need to set up pipe B
|
||||
to generate an IRQ on writes */
|
||||
pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
|
||||
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
||||
|
||||
dev_priv->irq_mask_reg &=
|
||||
~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
|
||||
}
|
||||
|
||||
dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
|
||||
|
||||
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
|
||||
ASLE_PFMB_EN;
|
||||
asle->ardy = 1;
|
||||
}
|
||||
}
|
||||
|
||||
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
|
||||
#define ACPI_EV_LID (1<<1)
|
||||
#define ACPI_EV_DOCK (1<<2)
|
||||
|
||||
static struct intel_opregion *system_opregion;
|
||||
|
||||
int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
/* The only video events relevant to opregion are 0x80. These indicate
|
||||
either a docking event, lid switch or display switch request. In
|
||||
Linux, these are handled by the dock, button and video drivers.
|
||||
We might want to fix the video driver to be opregion-aware in
|
||||
future, but right now we just indicate to the firmware that the
|
||||
request has been handled */
|
||||
|
||||
struct opregion_acpi *acpi;
|
||||
|
||||
if (!system_opregion)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
acpi = system_opregion->acpi;
|
||||
acpi->csts = 0;
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block intel_opregion_notifier = {
|
||||
.notifier_call = intel_opregion_video_event,
|
||||
};
|
||||
|
||||
int intel_opregion_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
void *base;
|
||||
u32 asls, mboxes;
|
||||
int err = 0;
|
||||
|
||||
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
|
||||
DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
|
||||
if (asls == 0) {
|
||||
DRM_DEBUG("ACPI OpRegion not supported!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
base = ioremap(asls, OPREGION_SZ);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
|
||||
opregion->header = base;
|
||||
if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
|
||||
DRM_DEBUG("opregion signature mismatch\n");
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
mboxes = opregion->header->mboxes;
|
||||
if (mboxes & MBOX_ACPI) {
|
||||
DRM_DEBUG("Public ACPI methods supported\n");
|
||||
opregion->acpi = base + OPREGION_ACPI_OFFSET;
|
||||
} else {
|
||||
DRM_DEBUG("Public ACPI methods not supported\n");
|
||||
err = -ENOTSUPP;
|
||||
goto err_out;
|
||||
}
|
||||
opregion->enabled = 1;
|
||||
|
||||
if (mboxes & MBOX_SWSCI) {
|
||||
DRM_DEBUG("SWSCI supported\n");
|
||||
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
|
||||
}
|
||||
if (mboxes & MBOX_ASLE) {
|
||||
DRM_DEBUG("ASLE supported\n");
|
||||
opregion->asle = base + OPREGION_ASLE_OFFSET;
|
||||
}
|
||||
|
||||
/* Notify BIOS we are ready to handle ACPI video ext notifs.
|
||||
* Right now, all the events are handled by the ACPI video module.
|
||||
* We don't actually need to do anything with them. */
|
||||
opregion->acpi->csts = 0;
|
||||
opregion->acpi->drdy = 1;
|
||||
|
||||
system_opregion = opregion;
|
||||
register_acpi_notifier(&intel_opregion_notifier);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
iounmap(opregion->header);
|
||||
opregion->header = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
void intel_opregion_free(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
|
||||
if (!opregion->enabled)
|
||||
return;
|
||||
|
||||
opregion->acpi->drdy = 0;
|
||||
|
||||
system_opregion = NULL;
|
||||
unregister_acpi_notifier(&intel_opregion_notifier);
|
||||
|
||||
/* just clear all opregion memory pointers now */
|
||||
iounmap(opregion->header);
|
||||
opregion->header = NULL;
|
||||
opregion->acpi = NULL;
|
||||
opregion->swsci = NULL;
|
||||
opregion->asle = NULL;
|
||||
|
||||
opregion->enabled = 0;
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
../shared-core/i915_suspend.c
|
|
@ -52,6 +52,28 @@ static int dri_library_name(struct drm_device * dev, char * buf)
|
|||
"r300"));
|
||||
}
|
||||
|
||||
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_resume(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
/* Restore interrupt registers */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
radeon_PCI_IDS
|
||||
};
|
||||
|
@ -69,6 +91,8 @@ static struct drm_driver driver = {
|
|||
.postclose = radeon_driver_postclose,
|
||||
.lastclose = radeon_driver_lastclose,
|
||||
.unload = radeon_driver_unload,
|
||||
.suspend = radeon_suspend,
|
||||
.resume = radeon_resume,
|
||||
.get_vblank_counter = radeon_get_vblank_counter,
|
||||
.enable_vblank = radeon_enable_vblank,
|
||||
.disable_vblank = radeon_disable_vblank,
|
||||
|
|
|
@ -135,7 +135,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
|
|||
DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
|
||||
|
||||
if (pCmdInfo->type == BTYPE_3D) {
|
||||
xgi_emit_flush(info, FALSE);
|
||||
xgi_emit_flush(info, false);
|
||||
}
|
||||
|
||||
info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
|
||||
|
@ -148,7 +148,9 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
|
|||
}
|
||||
|
||||
info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
|
||||
#ifdef XGI_HAVE_FENCE
|
||||
drm_fence_flush_old(info->dev, 0, info->next_sequence);
|
||||
#endif /* XGI_HAVE_FENCE */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -214,7 +216,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info)
|
|||
* list chain with a flush command.
|
||||
*/
|
||||
if (info->cmdring.last_ptr != NULL) {
|
||||
xgi_emit_flush(info, FALSE);
|
||||
xgi_emit_flush(info, false);
|
||||
xgi_emit_nop(info);
|
||||
}
|
||||
|
||||
|
@ -322,5 +324,5 @@ void xgi_emit_irq(struct xgi_info * info)
|
|||
if (info->cmdring.last_ptr == NULL)
|
||||
return;
|
||||
|
||||
xgi_emit_flush(info, TRUE);
|
||||
xgi_emit_flush(info, true);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,9 @@ static struct pci_device_id pciidlist[] = {
|
|||
xgi_PCI_IDS
|
||||
};
|
||||
|
||||
#ifdef XGI_HAVE_FENCE
|
||||
extern struct drm_fence_driver xgi_fence_driver;
|
||||
#endif /* XGI_HAVE_FENCE */
|
||||
|
||||
int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
|
||||
|
||||
|
@ -47,6 +49,8 @@ static struct drm_ioctl_desc xgi_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
|
||||
DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH),
|
||||
};
|
||||
|
||||
static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
|
||||
|
@ -58,6 +62,7 @@ static void xgi_driver_lastclose(struct drm_device * dev);
|
|||
static void xgi_reclaim_buffers_locked(struct drm_device * dev,
|
||||
struct drm_file * filp);
|
||||
static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
|
||||
static int xgi_kern_isr_postinstall(struct drm_device * dev);
|
||||
|
||||
|
||||
static struct drm_driver driver = {
|
||||
|
@ -70,7 +75,7 @@ static struct drm_driver driver = {
|
|||
.lastclose = xgi_driver_lastclose,
|
||||
.dma_quiescent = NULL,
|
||||
.irq_preinstall = NULL,
|
||||
.irq_postinstall = NULL,
|
||||
.irq_postinstall = xgi_kern_isr_postinstall,
|
||||
.irq_uninstall = NULL,
|
||||
.irq_handler = xgi_kern_isr,
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
|
@ -100,7 +105,9 @@ static struct drm_driver driver = {
|
|||
.remove = __devexit_p(drm_cleanup_pci),
|
||||
},
|
||||
|
||||
#ifdef XGI_HAVE_FENCE
|
||||
.fence_driver = &xgi_fence_driver,
|
||||
#endif /* XGI_HAVE_FENCE */
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
|
@ -307,8 +314,8 @@ void xgi_driver_lastclose(struct drm_device * dev)
|
|||
|| info->pcie_heap_initialized) {
|
||||
drm_sman_cleanup(&info->sman);
|
||||
|
||||
info->fb_heap_initialized = FALSE;
|
||||
info->pcie_heap_initialized = FALSE;
|
||||
info->fb_heap_initialized = false;
|
||||
info->pcie_heap_initialized = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -355,7 +362,10 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
|
|||
DRM_WRITE32(info->mmio_map,
|
||||
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
||||
cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
|
||||
#ifdef XGI_HAVE_FENCE
|
||||
xgi_fence_handler(dev);
|
||||
#endif /* XGI_HAVE_FENCE */
|
||||
DRM_WAKEUP(&info->fence_queue);
|
||||
return IRQ_HANDLED;
|
||||
} else {
|
||||
return IRQ_NONE;
|
||||
|
@ -363,6 +373,15 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
|
|||
}
|
||||
|
||||
|
||||
int xgi_kern_isr_postinstall(struct drm_device * dev)
|
||||
{
|
||||
struct xgi_info *info = dev->dev_private;
|
||||
|
||||
DRM_INIT_WAITQUEUE(&info->fence_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int xgi_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
|
||||
|
|
|
@ -35,11 +35,11 @@
|
|||
|
||||
#define DRIVER_NAME "xgi"
|
||||
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
|
||||
#define DRIVER_DATE "20071003"
|
||||
#define DRIVER_DATE "20080612"
|
||||
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 1
|
||||
#define DRIVER_PATCHLEVEL 3
|
||||
#define DRIVER_MINOR 2
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#include "xgi_cmdlist.h"
|
||||
#include "xgi_drm.h"
|
||||
|
@ -74,6 +74,7 @@ struct xgi_info {
|
|||
struct xgi_cmdring_info cmdring;
|
||||
|
||||
DRM_SPINTYPE fence_lock;
|
||||
wait_queue_head_t fence_queue;
|
||||
unsigned complete_sequence;
|
||||
unsigned next_sequence;
|
||||
};
|
||||
|
@ -86,7 +87,7 @@ extern int xgi_fb_heap_init(struct xgi_info * info);
|
|||
extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
||||
struct drm_file * filp);
|
||||
|
||||
extern int xgi_free(struct xgi_info * info, unsigned long index,
|
||||
extern int xgi_free(struct xgi_info * info, unsigned int index,
|
||||
struct drm_file * filp);
|
||||
|
||||
extern int xgi_pcie_heap_init(struct xgi_info * info);
|
||||
|
@ -98,12 +99,24 @@ extern void xgi_disable_mmio(struct xgi_info * info);
|
|||
extern void xgi_enable_ge(struct xgi_info * info);
|
||||
extern void xgi_disable_ge(struct xgi_info * info);
|
||||
|
||||
/* TTM-style fences.
|
||||
*/
|
||||
#ifdef XGI_HAVE_FENCE
|
||||
extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
|
||||
extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
|
||||
uint32_t flags, uint32_t * sequence, uint32_t * native_type);
|
||||
extern void xgi_fence_handler(struct drm_device * dev);
|
||||
extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags);
|
||||
#endif /* XGI_HAVE_FENCE */
|
||||
|
||||
|
||||
/* Non-TTM-style fences.
|
||||
*/
|
||||
extern int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
|
||||
struct drm_file * filp);
|
||||
extern int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
|
||||
struct drm_file * filp);
|
||||
|
||||
extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
|
||||
struct drm_file * filp);
|
||||
|
|
|
@ -93,7 +93,7 @@ int xgi_alloc_ioctl(struct drm_device * dev, void * data,
|
|||
}
|
||||
|
||||
|
||||
int xgi_free(struct xgi_info * info, unsigned long index,
|
||||
int xgi_free(struct xgi_info * info, unsigned int index,
|
||||
struct drm_file * filp)
|
||||
{
|
||||
int err;
|
||||
|
@ -111,7 +111,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data,
|
|||
{
|
||||
struct xgi_info *info = dev->dev_private;
|
||||
|
||||
return xgi_free(info, *(unsigned long *) data, filp);
|
||||
return xgi_free(info, *(unsigned int *) data, filp);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -30,6 +30,76 @@
|
|||
#include "xgi_misc.h"
|
||||
#include "xgi_cmdlist.h"
|
||||
|
||||
static int xgi_low_level_fence_emit(struct drm_device *dev, u32 *sequence)
|
||||
{
|
||||
struct xgi_info *const info = dev->dev_private;
|
||||
|
||||
if (info == NULL) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_SPINLOCK(&info->fence_lock);
|
||||
info->next_sequence++;
|
||||
if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
|
||||
info->next_sequence = 1;
|
||||
}
|
||||
|
||||
*sequence = (u32) info->next_sequence;
|
||||
DRM_SPINUNLOCK(&info->fence_lock);
|
||||
|
||||
|
||||
xgi_emit_irq(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define GET_BEGIN_ID(i) (le32_to_cpu(DRM_READ32((i)->mmio_map, 0x2820)) \
|
||||
& BEGIN_BEGIN_IDENTIFICATION_MASK)
|
||||
|
||||
static int xgi_low_level_fence_wait(struct drm_device *dev, unsigned *sequence)
|
||||
{
|
||||
struct xgi_info *const info = dev->dev_private;
|
||||
unsigned int cur_fence;
|
||||
int ret = 0;
|
||||
|
||||
if (info == NULL) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Assume that the user has missed the current sequence number
|
||||
* by about a day rather than she wants to wait for years
|
||||
* using fences.
|
||||
*/
|
||||
DRM_WAIT_ON(ret, info->fence_queue, 3 * DRM_HZ,
|
||||
((((cur_fence = GET_BEGIN_ID(info))
|
||||
- *sequence) & BEGIN_BEGIN_IDENTIFICATION_MASK)
|
||||
<= (1 << 18)));
|
||||
|
||||
info->complete_sequence = cur_fence;
|
||||
*sequence = cur_fence;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
|
||||
struct drm_file * filp)
|
||||
{
|
||||
(void) filp;
|
||||
return xgi_low_level_fence_emit(dev, (u32 *) data);
|
||||
}
|
||||
|
||||
|
||||
int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
|
||||
struct drm_file * filp)
|
||||
{
|
||||
(void) filp;
|
||||
return xgi_low_level_fence_wait(dev, (u32 *) data);
|
||||
}
|
||||
|
||||
|
||||
#ifdef XGI_HAVE_FENCE
|
||||
static void xgi_fence_poll(struct drm_device * dev, uint32_t class,
|
||||
uint32_t waiting_types)
|
||||
{
|
||||
|
@ -68,25 +138,18 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
|
|||
uint32_t flags, uint32_t * sequence,
|
||||
uint32_t * native_type)
|
||||
{
|
||||
struct xgi_info * info = dev->dev_private;
|
||||
int err;
|
||||
|
||||
if ((info == NULL) || (class != 0))
|
||||
(void) flags;
|
||||
|
||||
if (class != 0)
|
||||
return -EINVAL;
|
||||
|
||||
err = xgi_low_level_fence_emit(dev, sequence);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
DRM_SPINLOCK(&info->fence_lock);
|
||||
info->next_sequence++;
|
||||
if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
|
||||
info->next_sequence = 1;
|
||||
}
|
||||
DRM_SPINUNLOCK(&info->fence_lock);
|
||||
|
||||
|
||||
xgi_emit_irq(info);
|
||||
|
||||
*sequence = (uint32_t) info->next_sequence;
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -120,3 +183,4 @@ struct drm_fence_driver xgi_fence_driver = {
|
|||
.wait = NULL
|
||||
};
|
||||
|
||||
#endif /* XGI_HAVE_FENCE */
|
||||
|
|
|
@ -46,41 +46,41 @@ static bool xgi_validate_signal(struct drm_map * map)
|
|||
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||
|
||||
if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check RO channel */
|
||||
DRM_WRITE8(map, 0x235c, 0x83);
|
||||
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check RW channel */
|
||||
DRM_WRITE8(map, 0x235c, 0x88);
|
||||
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check RO channel outstanding */
|
||||
DRM_WRITE8(map, 0x235c, 0x8f);
|
||||
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||
if (0 != (check & 0x3ff)) {
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check RW channel outstanding */
|
||||
DRM_WRITE8(map, 0x235c, 0x90);
|
||||
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||
if (0 != (check & 0x3ff)) {
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* No pending PCIE request. GE stall. */
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -138,7 +138,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
|||
bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||
{
|
||||
const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
|
||||
bool is_support_auto_reset = FALSE;
|
||||
bool is_support_auto_reset = false;
|
||||
|
||||
/* Check GE on/off */
|
||||
if (0 == (0xffffc0f0 & int_status)) {
|
||||
|
@ -179,15 +179,15 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
|
|||
cpu_to_le32((int_status & ~0x01) | 0x04000000));
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool xgi_crt_irq_handler(struct xgi_info * info)
|
||||
{
|
||||
bool ret = FALSE;
|
||||
bool ret = false;
|
||||
u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
|
||||
|
||||
/* CRT1 interrupt just happened
|
||||
|
@ -205,7 +205,7 @@ bool xgi_crt_irq_handler(struct xgi_info * info)
|
|||
op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
|
||||
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
|
||||
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
|
||||
ret = TRUE;
|
||||
ret = true;
|
||||
}
|
||||
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
|
||||
|
||||
|
@ -214,7 +214,7 @@ bool xgi_crt_irq_handler(struct xgi_info * info)
|
|||
|
||||
bool xgi_dvi_irq_handler(struct xgi_info * info)
|
||||
{
|
||||
bool ret = FALSE;
|
||||
bool ret = false;
|
||||
const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
|
||||
|
||||
/* DVI interrupt just happened
|
||||
|
@ -242,7 +242,7 @@ bool xgi_dvi_irq_handler(struct xgi_info * info)
|
|||
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
|
||||
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
|
||||
|
||||
ret = TRUE;
|
||||
ret = true;
|
||||
}
|
||||
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
|
||||
|
||||
|
|
|
@ -1049,7 +1049,7 @@ struct drm_gem_open {
|
|||
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
|
||||
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
|
||||
|
||||
#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather)
|
||||
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
|
||||
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
|
||||
|
||||
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
|
||||
|
|
|
@ -79,7 +79,6 @@
|
|||
0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22"
|
||||
0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
|
||||
0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464"
|
||||
0x1002 0x5657 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550XTX"
|
||||
0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
|
||||
0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
|
||||
0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
|
||||
|
@ -97,9 +96,10 @@
|
|||
0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26"
|
||||
0x1002 0x5652 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
|
||||
0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
|
||||
0x1002 0x5657 CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon X550XTX"
|
||||
0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP"
|
||||
0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
|
||||
0x1002 0x5954 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G"
|
||||
0x1002 0x5954 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G"
|
||||
0x1002 0x5955 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955"
|
||||
0x1002 0x5974 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200"
|
||||
0x1002 0x5975 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP"
|
||||
|
@ -109,8 +109,10 @@
|
|||
0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE"
|
||||
0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI"
|
||||
0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50"
|
||||
0x1002 0x5a61 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200"
|
||||
0x1002 0x5a62 CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M"
|
||||
0x1002 0x5a41 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART "ATI Radeon XPRESS 200 5A41 (PCIE)"
|
||||
0x1002 0x5a42 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5A42 (PCIE)"
|
||||
0x1002 0x5a61 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200"
|
||||
0x1002 0x5a62 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M"
|
||||
0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
|
||||
0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
|
||||
0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550"
|
||||
|
@ -392,7 +394,10 @@
|
|||
0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33"
|
||||
0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35"
|
||||
0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33"
|
||||
0x8086 0x2A42 CHIP_I9XX|CHIP_I965 "Intel Integrated Graphics Device"
|
||||
0x8086 0x2A42 CHIP_I9XX|CHIP_I965 "Mobile Intel® GM45 Express Chipset"
|
||||
0x8086 0x2E02 CHIP_I9XX|CHIP_I965 "Intel Integrated Graphics Device"
|
||||
0x8086 0x2E12 CHIP_I9XX|CHIP_I965 "Intel Q45/Q43"
|
||||
0x8086 0x2E22 CHIP_I9XX|CHIP_I965 "Intel G45/G43"
|
||||
|
||||
[imagine]
|
||||
0x105d 0x2309 IMAGINE_128 "Imagine 128"
|
||||
|
|
|
@ -40,14 +40,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
|
|||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
|
||||
u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
|
||||
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
|
||||
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
|
||||
u32 last_acthd = I915_READ(acthd_reg);
|
||||
u32 acthd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 100000; i++) {
|
||||
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
|
||||
acthd = I915_READ(acthd_reg);
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0)
|
||||
|
@ -136,8 +136,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
|
||||
|
||||
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
|
||||
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
|
||||
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->Size;
|
||||
|
@ -542,8 +542,8 @@ void i915_emit_breadcrumb(struct drm_device *dev)
|
|||
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(CMD_STORE_DWORD_IDX);
|
||||
OUT_RING(5 << STORE_DWORD_INDEX_SHIFT);
|
||||
OUT_RING(MI_STORE_DWORD_INDEX);
|
||||
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
OUT_RING(dev_priv->counter);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
|
@ -553,7 +553,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
|
|||
int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t flush_cmd = CMD_MI_FLUSH;
|
||||
uint32_t flush_cmd = MI_FLUSH;
|
||||
RING_LOCALS;
|
||||
|
||||
flush_cmd |= flush;
|
||||
|
@ -1032,7 +1032,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
dev_priv->hw_status_page = dev_priv->hws_map.handle;
|
||||
|
||||
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
|
||||
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
|
||||
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
|
||||
DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
|
||||
dev_priv->status_gfx_addr);
|
||||
DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
|
||||
|
@ -1041,9 +1041,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
|
||||
int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv;
|
||||
unsigned long base, size;
|
||||
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
|
||||
int ret = 0, num_pipes = 2, mmio_bar = IS_I9XX(dev) ? 0 : 1;
|
||||
|
||||
/* i915 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
|
@ -1074,27 +1074,76 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
|
||||
intel_init_chipset_flush_compat(dev);
|
||||
#endif
|
||||
intel_opregion_init(dev);
|
||||
#endif
|
||||
|
||||
/* Init HWS
|
||||
*/
|
||||
if (!I915_NEED_GFX_HWS(dev)) {
|
||||
/* Init HWS */
|
||||
if (!I915_NEED_GFX_HWS(dev)) {
|
||||
ret = i915_init_hardware_status(dev);
|
||||
if(ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
I915_WRITE16(HWSTAM, 0xeffe);
|
||||
I915_WRITE16(IMR, 0x0);
|
||||
I915_WRITE16(IER, 0x0);
|
||||
|
||||
DRM_SPININIT(&dev_priv->swaps_lock, "swap");
|
||||
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
|
||||
dev_priv->swaps_pending = 0;
|
||||
|
||||
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
|
||||
dev_priv->user_irq_refcount = 0;
|
||||
dev_priv->irq_mask_reg = ~0;
|
||||
|
||||
ret = drm_vblank_init(dev, num_pipes);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
|
||||
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
|
||||
|
||||
i915_enable_interrupt(dev);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
|
||||
|
||||
/*
|
||||
* Initialize the hardware status page IRQ location.
|
||||
*/
|
||||
|
||||
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_driver_unload(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 temp;
|
||||
|
||||
if (dev_priv) {
|
||||
dev_priv->vblank_pipe = 0;
|
||||
|
||||
dev_priv->irq_enabled = 0;
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
I915_WRITE(IMR, 0xffffffff);
|
||||
I915_WRITE(IER, 0x0);
|
||||
|
||||
temp = I915_READ(PIPEASTAT);
|
||||
I915_WRITE(PIPEASTAT, temp);
|
||||
temp = I915_READ(PIPEBSTAT);
|
||||
I915_WRITE(PIPEBSTAT, temp);
|
||||
temp = I915_READ(IIR);
|
||||
I915_WRITE(IIR, temp);
|
||||
}
|
||||
|
||||
i915_free_hardware_status(dev);
|
||||
|
||||
drm_rmmap(dev, dev_priv->mmio_map);
|
||||
|
||||
#ifdef __linux__
|
||||
intel_opregion_free(dev);
|
||||
#endif
|
||||
|
||||
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
|
||||
DRM_MEM_DRIVER);
|
||||
#ifdef __linux__
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -45,8 +45,8 @@ i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
|
|||
{
|
||||
if ((dev_priv->irq_mask_reg & mask) != 0) {
|
||||
dev_priv->irq_mask_reg &= ~mask;
|
||||
I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(I915REG_INT_MASK_R);
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(IMR);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,8 @@ i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
|
|||
{
|
||||
if ((dev_priv->irq_mask_reg & mask) != mask) {
|
||||
dev_priv->irq_mask_reg |= mask;
|
||||
I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(I915REG_INT_MASK_R);
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(IMR);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,11 +189,11 @@ static void i915_vblank_tasklet(struct drm_device *dev)
|
|||
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
|
||||
RING_LOCALS;
|
||||
|
||||
if (sarea_priv->front_tiled) {
|
||||
if (IS_I965G(dev) && sarea_priv->front_tiled) {
|
||||
cmd |= XY_SRC_COPY_BLT_DST_TILED;
|
||||
dst_pitch >>= 2;
|
||||
}
|
||||
if (sarea_priv->back_tiled) {
|
||||
if (IS_I965G(dev) && sarea_priv->back_tiled) {
|
||||
cmd |= XY_SRC_COPY_BLT_SRC_TILED;
|
||||
src_pitch >>= 2;
|
||||
}
|
||||
|
@ -388,28 +388,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
|
|||
drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
static int i915_in_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long pipedsl, vblank, vtotal;
|
||||
unsigned long vbl_start, vbl_end, cur_line;
|
||||
|
||||
pipedsl = pipe ? PIPEBDSL : PIPEADSL;
|
||||
vblank = pipe ? VBLANK_B : VBLANK_A;
|
||||
vtotal = pipe ? VTOTAL_B : VTOTAL_A;
|
||||
|
||||
vbl_start = I915_READ(vblank) & VBLANK_START_MASK;
|
||||
vbl_end = (I915_READ(vblank) >> VBLANK_END_SHIFT) & VBLANK_END_MASK;
|
||||
|
||||
cur_line = I915_READ(pipedsl);
|
||||
|
||||
if (cur_line >= vbl_start)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
@ -443,22 +422,6 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
|
|||
|
||||
count = (high1 << 8) | low;
|
||||
|
||||
/*
|
||||
* If we're in the middle of the vblank period, the
|
||||
* above regs won't have been updated yet, so return
|
||||
* an incremented count to stay accurate
|
||||
*/
|
||||
#if 0
|
||||
if (i915_in_vblank(dev, pipe))
|
||||
count++;
|
||||
#endif
|
||||
/* count may be reset by other driver(e.g. 2D driver),
|
||||
we have no way to know if it is wrapped or resetted
|
||||
when count is zero. do a rough guess.
|
||||
*/
|
||||
if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
|
||||
dev->last_vblank[pipe] = 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -471,23 +434,16 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
int vblank = 0;
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
I915_WRITE(I915REG_INT_MASK_R, ~0);
|
||||
iir = I915_READ(I915REG_INT_IDENTITY_R);
|
||||
I915_WRITE(IMR, ~0);
|
||||
iir = I915_READ(IIR);
|
||||
#if 0
|
||||
DRM_DEBUG("flag=%08x\n", iir);
|
||||
#endif
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
if (iir == 0) {
|
||||
DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
|
||||
iir,
|
||||
I915_READ(I915REG_INT_MASK_R),
|
||||
I915_READ(I915REG_INT_ENABLE_R),
|
||||
I915_READ(I915REG_PIPEASTAT),
|
||||
I915_READ(I915REG_PIPEBSTAT));
|
||||
if (dev->pdev->msi_enabled) {
|
||||
I915_WRITE(I915REG_INT_MASK_R,
|
||||
dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(I915REG_INT_MASK_R);
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(IMR);
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
@ -497,22 +453,52 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
* we may get extra interrupts.
|
||||
*/
|
||||
if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
|
||||
pipea_stats = I915_READ(I915REG_PIPEASTAT);
|
||||
I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
|
||||
pipea_stats = I915_READ(PIPEASTAT);
|
||||
if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
||||
PIPE_VBLANK_INTERRUPT_STATUS))
|
||||
{
|
||||
vblank++;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
||||
}
|
||||
I915_WRITE(PIPEASTAT, pipea_stats);
|
||||
}
|
||||
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
|
||||
pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
|
||||
I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
|
||||
pipeb_stats = I915_READ(PIPEBSTAT);
|
||||
/* Ack the event */
|
||||
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
||||
|
||||
/* The vblank interrupt gets enabled even if we didn't ask for
|
||||
it, so make sure it's shut down again */
|
||||
if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
|
||||
pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
||||
PIPE_VBLANK_INTERRUPT_STATUS))
|
||||
{
|
||||
vblank++;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE)
|
||||
opregion_asle_intr(dev);
|
||||
#endif
|
||||
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
||||
}
|
||||
|
||||
I915_WRITE(I915REG_INT_IDENTITY_R, iir);
|
||||
if (dev->pdev->msi_enabled)
|
||||
I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted writes */
|
||||
#ifdef __linux__
|
||||
if (iir & I915_ASLE_INTERRUPT)
|
||||
opregion_asle_intr(dev);
|
||||
#endif
|
||||
|
||||
if (dev_priv->sarea_priv)
|
||||
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
I915_WRITE(IIR, iir);
|
||||
if (dev->pdev->msi_enabled)
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
(void) I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT) {
|
||||
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
|
||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
|
@ -521,16 +507,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
#endif
|
||||
}
|
||||
|
||||
if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
|
||||
I915_VBLANK_INTERRUPT_STATUS)) {
|
||||
vblank = 1;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
||||
}
|
||||
if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
|
||||
I915_VBLANK_INTERRUPT_STATUS)) {
|
||||
vblank = 1;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
||||
}
|
||||
if (vblank) {
|
||||
if (dev_priv->swaps_pending > 0)
|
||||
drm_locked_tasklet(dev, i915_vblank_tasklet);
|
||||
|
@ -552,7 +528,7 @@ int i915_emit_irq(struct drm_device *dev)
|
|||
|
||||
BEGIN_LP_RING(2);
|
||||
OUT_RING(0);
|
||||
OUT_RING(GFX_OP_USER_INTERRUPT);
|
||||
OUT_RING(MI_USER_INTERRUPT);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
return dev_priv->counter;
|
||||
|
@ -664,11 +640,11 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
|
|||
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
pipestat_reg = I915REG_PIPEASTAT;
|
||||
pipestat_reg = PIPEASTAT;
|
||||
mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
|
||||
break;
|
||||
case 1:
|
||||
pipestat_reg = I915REG_PIPEBSTAT;
|
||||
pipestat_reg = PIPEBSTAT;
|
||||
mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
|
||||
break;
|
||||
default:
|
||||
|
@ -685,14 +661,14 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
|
|||
* but
|
||||
*/
|
||||
if (IS_I965G (dev))
|
||||
pipestat |= I915_START_VBLANK_INTERRUPT_ENABLE;
|
||||
pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
|
||||
else
|
||||
pipestat |= I915_VBLANK_INTERRUPT_ENABLE;
|
||||
pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
|
||||
/*
|
||||
* Clear any pending status
|
||||
*/
|
||||
pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
|
||||
I915_VBLANK_INTERRUPT_STATUS);
|
||||
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
|
||||
PIPE_VBLANK_INTERRUPT_STATUS);
|
||||
I915_WRITE(pipestat_reg, pipestat);
|
||||
}
|
||||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||
|
@ -712,11 +688,11 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
|
|||
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
pipestat_reg = I915REG_PIPEASTAT;
|
||||
pipestat_reg = PIPEASTAT;
|
||||
mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
|
||||
break;
|
||||
case 1:
|
||||
pipestat_reg = I915REG_PIPEBSTAT;
|
||||
pipestat_reg = PIPEBSTAT;
|
||||
mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
|
||||
break;
|
||||
default:
|
||||
|
@ -728,42 +704,36 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
|
|||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||
i915_disable_irq(dev_priv, mask_reg);
|
||||
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
||||
|
||||
if (pipestat_reg)
|
||||
{
|
||||
pipestat = I915_READ (pipestat_reg);
|
||||
pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
|
||||
I915_VBLANK_INTERRUPT_ENABLE);
|
||||
pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
/*
|
||||
* Clear any pending status
|
||||
*/
|
||||
pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
|
||||
I915_VBLANK_INTERRUPT_STATUS);
|
||||
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
|
||||
PIPE_VBLANK_INTERRUPT_STATUS);
|
||||
I915_WRITE(pipestat_reg, pipestat);
|
||||
(void) I915_READ(pipestat_reg);
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_enable_interrupt (struct drm_device *dev)
|
||||
void i915_enable_interrupt (struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
dev_priv->irq_mask_reg = ~0;
|
||||
I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
|
||||
I915_WRITE(I915REG_INT_ENABLE_R, I915_INTERRUPT_ENABLE_MASK);
|
||||
(void) I915_READ (I915REG_INT_ENABLE_R);
|
||||
dev_priv->irq_enabled = 1;
|
||||
}
|
||||
|
||||
static void i915_disable_interrupt (struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
I915_WRITE(I915REG_HWSTAM, 0xffffffff);
|
||||
I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
|
||||
I915_WRITE(I915REG_INT_ENABLE_R, 0);
|
||||
I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
|
||||
(void) I915_READ (I915REG_INT_IDENTITY_R);
|
||||
dev_priv->irq_enabled = 0;
|
||||
dev_priv->irq_mask_reg = ~0;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
|
||||
(void) I915_READ (IER);
|
||||
|
||||
#ifdef __linux__
|
||||
opregion_enable_asle(dev);
|
||||
#endif
|
||||
|
||||
dev_priv->irq_enabled = 1;
|
||||
}
|
||||
|
||||
/* Set the vblank monitor pipe
|
||||
|
@ -772,20 +742,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_vblank_pipe_t *pipe = data;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
|
||||
DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->vblank_pipe = pipe->pipe;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -794,20 +756,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
|
|||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_vblank_pipe_t *pipe = data;
|
||||
u32 flag = 0;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev_priv->irq_enabled)
|
||||
flag = ~dev_priv->irq_mask_reg;
|
||||
pipe->pipe = 0;
|
||||
if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
|
||||
pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
|
||||
if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
|
||||
pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
|
||||
pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -868,7 +823,13 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
|
||||
|
||||
drm_update_vblank_count(dev, pipe);
|
||||
/*
|
||||
* We take the ref here and put it when the swap actually completes
|
||||
* in the tasklet.
|
||||
*/
|
||||
ret = drm_vblank_get(dev, pipe);
|
||||
if (ret)
|
||||
return ret;
|
||||
curseq = drm_vblank_count(dev, pipe);
|
||||
|
||||
if (seqtype == _DRM_VBLANK_RELATIVE)
|
||||
|
@ -879,6 +840,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
swap->sequence = curseq + 1;
|
||||
} else {
|
||||
DRM_DEBUG("Missed target sequence\n");
|
||||
drm_vblank_put(dev, pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -900,6 +862,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
irqflags);
|
||||
DRM_DEBUG("Invalid drawable ID %d\n",
|
||||
swap->drawable);
|
||||
drm_vblank_put(dev, pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -907,6 +870,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
|
||||
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
|
||||
|
||||
drm_vblank_put(dev, pipe);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -930,6 +894,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
|
||||
if (dev_priv->swaps_pending >= 100) {
|
||||
DRM_DEBUG("Too many swaps queued\n");
|
||||
drm_vblank_put(dev, pipe);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -937,17 +902,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
|
||||
if (!vbl_swap) {
|
||||
DRM_ERROR("Failed to allocate memory to queue swap\n");
|
||||
drm_vblank_put(dev, pipe);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
ret = drm_vblank_get(dev, pipe);
|
||||
if (ret) {
|
||||
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vbl_swap->drw_id = swap->drawable;
|
||||
vbl_swap->plane = plane;
|
||||
vbl_swap->sequence = swap->sequence;
|
||||
|
@ -970,58 +930,15 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
*/
|
||||
void i915_driver_irq_preinstall(struct drm_device * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
atomic_set(&dev_priv->irq_received, 0);
|
||||
I915_WRITE(I915REG_HWSTAM, 0xffff);
|
||||
I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
|
||||
I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
|
||||
I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
|
||||
(void) I915_READ(I915REG_INT_IDENTITY_R);
|
||||
return;
|
||||
}
|
||||
|
||||
int i915_driver_irq_postinstall(struct drm_device * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int ret, num_pipes = 2;
|
||||
|
||||
DRM_SPININIT(&dev_priv->swaps_lock, "swap");
|
||||
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
|
||||
dev_priv->swaps_pending = 0;
|
||||
|
||||
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
|
||||
dev_priv->user_irq_refcount = 0;
|
||||
dev_priv->irq_mask_reg = 0;
|
||||
|
||||
ret = drm_vblank_init(dev, num_pipes);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
|
||||
|
||||
i915_enable_interrupt(dev);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
|
||||
|
||||
/*
|
||||
* Initialize the hardware status page IRQ location.
|
||||
*/
|
||||
|
||||
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_driver_irq_uninstall(struct drm_device * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 temp;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
i915_disable_interrupt (dev);
|
||||
|
||||
temp = I915_READ(I915REG_PIPEASTAT);
|
||||
I915_WRITE(I915REG_PIPEASTAT, temp);
|
||||
temp = I915_READ(I915REG_PIPEBSTAT);
|
||||
I915_WRITE(I915REG_PIPEBSTAT, temp);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,520 @@
|
|||
/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
|
||||
*/
|
||||
/*
|
||||
*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
|
||||
else
|
||||
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
|
||||
}
|
||||
|
||||
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
|
||||
u32 *array;
|
||||
int i;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return;
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
array = dev_priv->save_palette_a;
|
||||
else
|
||||
array = dev_priv->save_palette_b;
|
||||
|
||||
for(i = 0; i < 256; i++)
|
||||
array[i] = I915_READ(reg + (i << 2));
|
||||
}
|
||||
|
||||
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
|
||||
u32 *array;
|
||||
int i;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return;
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
array = dev_priv->save_palette_a;
|
||||
else
|
||||
array = dev_priv->save_palette_b;
|
||||
|
||||
for(i = 0; i < 256; i++)
|
||||
I915_WRITE(reg + (i << 2), array[i]);
|
||||
}
|
||||
|
||||
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE8(index_port, reg);
|
||||
return I915_READ8(data_port);
|
||||
}
|
||||
|
||||
static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_READ8(st01);
|
||||
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
|
||||
return I915_READ8(VGA_AR_DATA_READ);
|
||||
}
|
||||
|
||||
static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_READ8(st01);
|
||||
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
|
||||
I915_WRITE8(VGA_AR_DATA_WRITE, val);
|
||||
}
|
||||
|
||||
static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE8(index_port, reg);
|
||||
I915_WRITE8(data_port, val);
|
||||
}
|
||||
|
||||
static void i915_save_vga(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
u16 cr_index, cr_data, st01;
|
||||
|
||||
/* VGA color palette registers */
|
||||
dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
|
||||
/* DACCRX automatically increments during read */
|
||||
I915_WRITE8(VGA_DACRX, 0);
|
||||
/* Read 3 bytes of color data from each index */
|
||||
for (i = 0; i < 256 * 3; i++)
|
||||
dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
|
||||
|
||||
/* MSR bits */
|
||||
dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
|
||||
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
|
||||
cr_index = VGA_CR_INDEX_CGA;
|
||||
cr_data = VGA_CR_DATA_CGA;
|
||||
st01 = VGA_ST01_CGA;
|
||||
} else {
|
||||
cr_index = VGA_CR_INDEX_MDA;
|
||||
cr_data = VGA_CR_DATA_MDA;
|
||||
st01 = VGA_ST01_MDA;
|
||||
}
|
||||
|
||||
/* CRT controller regs */
|
||||
i915_write_indexed(dev, cr_index, cr_data, 0x11,
|
||||
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
|
||||
(~0x80));
|
||||
for (i = 0; i <= 0x24; i++)
|
||||
dev_priv->saveCR[i] =
|
||||
i915_read_indexed(dev, cr_index, cr_data, i);
|
||||
/* Make sure we don't turn off CR group 0 writes */
|
||||
dev_priv->saveCR[0x11] &= ~0x80;
|
||||
|
||||
/* Attribute controller registers */
|
||||
I915_READ8(st01);
|
||||
dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
|
||||
for (i = 0; i <= 0x14; i++)
|
||||
dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
|
||||
I915_READ8(st01);
|
||||
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
|
||||
I915_READ8(st01);
|
||||
|
||||
/* Graphics controller registers */
|
||||
for (i = 0; i < 9; i++)
|
||||
dev_priv->saveGR[i] =
|
||||
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
|
||||
|
||||
dev_priv->saveGR[0x10] =
|
||||
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
|
||||
dev_priv->saveGR[0x11] =
|
||||
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
|
||||
dev_priv->saveGR[0x18] =
|
||||
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
|
||||
|
||||
/* Sequencer registers */
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->saveSR[i] =
|
||||
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
|
||||
}
|
||||
|
||||
static void i915_restore_vga(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
u16 cr_index, cr_data, st01;
|
||||
|
||||
/* MSR bits */
|
||||
I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
|
||||
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
|
||||
cr_index = VGA_CR_INDEX_CGA;
|
||||
cr_data = VGA_CR_DATA_CGA;
|
||||
st01 = VGA_ST01_CGA;
|
||||
} else {
|
||||
cr_index = VGA_CR_INDEX_MDA;
|
||||
cr_data = VGA_CR_DATA_MDA;
|
||||
st01 = VGA_ST01_MDA;
|
||||
}
|
||||
|
||||
/* Sequencer registers, don't write SR07 */
|
||||
for (i = 0; i < 7; i++)
|
||||
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
|
||||
dev_priv->saveSR[i]);
|
||||
|
||||
/* CRT controller regs */
|
||||
/* Enable CR group 0 writes */
|
||||
i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
|
||||
for (i = 0; i <= 0x24; i++)
|
||||
i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
|
||||
|
||||
/* Graphics controller regs */
|
||||
for (i = 0; i < 9; i++)
|
||||
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
|
||||
dev_priv->saveGR[i]);
|
||||
|
||||
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
|
||||
dev_priv->saveGR[0x10]);
|
||||
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
|
||||
dev_priv->saveGR[0x11]);
|
||||
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
|
||||
dev_priv->saveGR[0x18]);
|
||||
|
||||
/* Attribute controller registers */
|
||||
I915_READ8(st01); /* switch back to index mode */
|
||||
for (i = 0; i <= 0x14; i++)
|
||||
i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
|
||||
I915_READ8(st01); /* switch back to index mode */
|
||||
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
|
||||
I915_READ8(st01);
|
||||
|
||||
/* VGA color palette registers */
|
||||
I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
|
||||
/* DACCRX automatically increments during read */
|
||||
I915_WRITE8(VGA_DACWX, 0);
|
||||
/* Read 3 bytes of color data from each index */
|
||||
for (i = 0; i < 256 * 3; i++)
|
||||
I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
|
||||
|
||||
}
|
||||
|
||||
int i915_save_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1);
|
||||
#else
|
||||
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
|
||||
#endif
|
||||
|
||||
/* Display arbitration control */
|
||||
dev_priv->saveDSPARB = I915_READ(DSPARB);
|
||||
|
||||
/* Pipe & plane A info */
|
||||
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
|
||||
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
|
||||
dev_priv->saveFPA0 = I915_READ(FPA0);
|
||||
dev_priv->saveFPA1 = I915_READ(FPA1);
|
||||
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
|
||||
if (IS_I965G(dev))
|
||||
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
|
||||
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
|
||||
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
|
||||
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
|
||||
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
|
||||
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
|
||||
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
|
||||
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
|
||||
|
||||
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
|
||||
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
|
||||
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
|
||||
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
|
||||
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
|
||||
if (IS_I965G(dev)) {
|
||||
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
|
||||
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
|
||||
}
|
||||
i915_save_palette(dev, PIPE_A);
|
||||
dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
|
||||
|
||||
/* Pipe & plane B info */
|
||||
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
|
||||
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
|
||||
dev_priv->saveFPB0 = I915_READ(FPB0);
|
||||
dev_priv->saveFPB1 = I915_READ(FPB1);
|
||||
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
|
||||
if (IS_I965G(dev))
|
||||
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
|
||||
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
|
||||
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
|
||||
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
|
||||
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
|
||||
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
|
||||
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
|
||||
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
|
||||
|
||||
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
|
||||
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
|
||||
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
|
||||
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
|
||||
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
|
||||
if (IS_I965GM(dev) || IS_GM45(dev)) {
|
||||
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
|
||||
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
|
||||
}
|
||||
i915_save_palette(dev, PIPE_B);
|
||||
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
|
||||
|
||||
/* CRT state */
|
||||
dev_priv->saveADPA = I915_READ(ADPA);
|
||||
|
||||
/* LVDS state */
|
||||
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
|
||||
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
|
||||
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
|
||||
if (IS_I965G(dev))
|
||||
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
|
||||
if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
dev_priv->saveLVDS = I915_READ(LVDS);
|
||||
if (!IS_I830(dev) && !IS_845G(dev))
|
||||
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
|
||||
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
|
||||
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
|
||||
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
|
||||
|
||||
/* FIXME: save TV & SDVO state */
|
||||
|
||||
/* FBC state */
|
||||
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
|
||||
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
|
||||
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
|
||||
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
|
||||
|
||||
/* Interrupt state */
|
||||
dev_priv->saveIIR = I915_READ(IIR);
|
||||
dev_priv->saveIER = I915_READ(IER);
|
||||
dev_priv->saveIMR = I915_READ(IMR);
|
||||
|
||||
/* VGA state */
|
||||
dev_priv->saveVGA0 = I915_READ(VGA0);
|
||||
dev_priv->saveVGA1 = I915_READ(VGA1);
|
||||
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
|
||||
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
|
||||
|
||||
/* Clock gating state */
|
||||
dev_priv->saveD_STATE = I915_READ(D_STATE);
|
||||
dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
|
||||
|
||||
/* Cache mode state */
|
||||
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
|
||||
|
||||
/* Memory Arbitration state */
|
||||
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
|
||||
|
||||
/* Scratch space */
|
||||
for (i = 0; i < 16; i++) {
|
||||
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
|
||||
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
|
||||
|
||||
i915_save_vga(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_restore_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
|
||||
#else
|
||||
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
|
||||
#endif
|
||||
|
||||
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
|
||||
|
||||
/* Pipe & plane A info */
|
||||
/* Prime the clock */
|
||||
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
|
||||
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
|
||||
~DPLL_VCO_ENABLE);
|
||||
DRM_UDELAY(150);
|
||||
}
|
||||
I915_WRITE(FPA0, dev_priv->saveFPA0);
|
||||
I915_WRITE(FPA1, dev_priv->saveFPA1);
|
||||
/* Actually enable it */
|
||||
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
|
||||
DRM_UDELAY(150);
|
||||
if (IS_I965G(dev))
|
||||
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
|
||||
DRM_UDELAY(150);
|
||||
|
||||
/* Restore mode */
|
||||
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
|
||||
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
|
||||
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
|
||||
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
|
||||
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
|
||||
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
|
||||
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
|
||||
|
||||
/* Restore plane info */
|
||||
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
|
||||
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
|
||||
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
|
||||
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
|
||||
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
|
||||
if (IS_I965G(dev)) {
|
||||
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
|
||||
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
|
||||
}
|
||||
|
||||
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
|
||||
|
||||
i915_restore_palette(dev, PIPE_A);
|
||||
/* Enable the plane */
|
||||
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
|
||||
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
|
||||
|
||||
/* Pipe & plane B info */
|
||||
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
|
||||
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
|
||||
~DPLL_VCO_ENABLE);
|
||||
DRM_UDELAY(150);
|
||||
}
|
||||
I915_WRITE(FPB0, dev_priv->saveFPB0);
|
||||
I915_WRITE(FPB1, dev_priv->saveFPB1);
|
||||
/* Actually enable it */
|
||||
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
|
||||
DRM_UDELAY(150);
|
||||
if (IS_I965G(dev))
|
||||
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
|
||||
DRM_UDELAY(150);
|
||||
|
||||
/* Restore mode */
|
||||
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
|
||||
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
|
||||
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
|
||||
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
|
||||
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
|
||||
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
|
||||
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
|
||||
|
||||
/* Restore plane info */
|
||||
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
|
||||
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
|
||||
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
|
||||
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
|
||||
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
|
||||
if (IS_I965G(dev)) {
|
||||
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
|
||||
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
|
||||
}
|
||||
|
||||
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
|
||||
|
||||
i915_restore_palette(dev, PIPE_B);
|
||||
/* Enable the plane */
|
||||
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
|
||||
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
|
||||
|
||||
/* CRT state */
|
||||
I915_WRITE(ADPA, dev_priv->saveADPA);
|
||||
|
||||
/* LVDS state */
|
||||
if (IS_I965G(dev))
|
||||
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
|
||||
if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
I915_WRITE(LVDS, dev_priv->saveLVDS);
|
||||
if (!IS_I830(dev) && !IS_845G(dev))
|
||||
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
|
||||
|
||||
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
|
||||
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
|
||||
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
|
||||
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
|
||||
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
|
||||
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
|
||||
|
||||
/* FIXME: restore TV & SDVO state */
|
||||
|
||||
/* FBC info */
|
||||
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
|
||||
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
|
||||
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
|
||||
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
|
||||
|
||||
/* VGA state */
|
||||
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
|
||||
I915_WRITE(VGA0, dev_priv->saveVGA0);
|
||||
I915_WRITE(VGA1, dev_priv->saveVGA1);
|
||||
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
|
||||
DRM_UDELAY(150);
|
||||
|
||||
/* Clock gating state */
|
||||
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
|
||||
I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
|
||||
|
||||
/* Cache mode state */
|
||||
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
|
||||
|
||||
/* Memory arbitration state */
|
||||
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
|
||||
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
|
||||
|
||||
i915_restore_vga(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -25,7 +25,7 @@
|
|||
#ifndef __NOUVEAU_DRM_H__
|
||||
#define __NOUVEAU_DRM_H__
|
||||
|
||||
#define NOUVEAU_DRM_HEADER_PATCHLEVEL 10
|
||||
#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11
|
||||
|
||||
struct drm_nouveau_channel_alloc {
|
||||
uint32_t fb_ctxdma_handle;
|
||||
|
@ -85,12 +85,16 @@ struct drm_nouveau_gpuobj_free {
|
|||
#define NOUVEAU_MEM_PINNED 0x00000040
|
||||
#define NOUVEAU_MEM_USER_BACKED 0x00000080
|
||||
#define NOUVEAU_MEM_MAPPED 0x00000100
|
||||
#define NOUVEAU_MEM_INSTANCE 0x00000200 /* internal */
|
||||
#define NOUVEAU_MEM_NOTIFIER 0x00000400 /* internal */
|
||||
#define NOUVEAU_MEM_NOVM 0x00000800 /* internal */
|
||||
#define NOUVEAU_MEM_TILE 0x00000200
|
||||
#define NOUVEAU_MEM_TILE_ZETA 0x00000400
|
||||
#define NOUVEAU_MEM_INSTANCE 0x01000000 /* internal */
|
||||
#define NOUVEAU_MEM_NOTIFIER 0x02000000 /* internal */
|
||||
#define NOUVEAU_MEM_NOVM 0x04000000 /* internal */
|
||||
#define NOUVEAU_MEM_USER 0x08000000 /* internal */
|
||||
#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \
|
||||
NOUVEAU_MEM_NOTIFIER | \
|
||||
NOUVEAU_MEM_NOVM)
|
||||
NOUVEAU_MEM_NOVM | \
|
||||
NOUVEAU_MEM_USER)
|
||||
|
||||
struct drm_nouveau_mem_alloc {
|
||||
int flags;
|
||||
|
@ -105,6 +109,13 @@ struct drm_nouveau_mem_free {
|
|||
int flags;
|
||||
};
|
||||
|
||||
struct drm_nouveau_mem_tile {
|
||||
uint64_t offset;
|
||||
uint64_t delta;
|
||||
uint64_t size;
|
||||
int flags;
|
||||
};
|
||||
|
||||
/* FIXME : maybe unify {GET,SET}PARAMs */
|
||||
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
|
||||
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
|
||||
|
@ -166,5 +177,6 @@ struct drm_nouveau_sarea {
|
|||
#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
|
||||
#define DRM_NOUVEAU_MEM_ALLOC 0x08
|
||||
#define DRM_NOUVEAU_MEM_FREE 0x09
|
||||
#define DRM_NOUVEAU_MEM_TILE 0x0a
|
||||
|
||||
#endif /* __NOUVEAU_DRM_H__ */
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
|
||||
#define DRIVER_MAJOR 0
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 10
|
||||
#define DRIVER_PATCHLEVEL 11
|
||||
|
||||
#define NOUVEAU_FAMILY 0x0000FFFF
|
||||
#define NOUVEAU_FLAGS 0xFFFF0000
|
||||
|
@ -350,7 +350,7 @@ extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
|
|||
uint64_t size);
|
||||
extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
|
||||
uint64_t size, int align2,
|
||||
struct drm_file *);
|
||||
struct drm_file *, int tail);
|
||||
extern void nouveau_mem_takedown(struct mem_block **heap);
|
||||
extern void nouveau_mem_free_block(struct mem_block *);
|
||||
extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
|
||||
|
@ -359,6 +359,8 @@ extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data,
|
|||
struct drm_file *);
|
||||
extern int nouveau_ioctl_mem_free(struct drm_device *, void *data,
|
||||
struct drm_file *);
|
||||
extern int nouveau_ioctl_mem_tile(struct drm_device *, void *data,
|
||||
struct drm_file *);
|
||||
extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
|
||||
int alignment, uint64_t size,
|
||||
int flags, struct drm_file *);
|
||||
|
|
|
@ -593,6 +593,7 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH),
|
||||
};
|
||||
|
||||
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
|
||||
|
|
|
@ -35,8 +35,9 @@
|
|||
#include "drm_sarea.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
|
||||
struct drm_file *file_priv)
|
||||
static struct mem_block *
|
||||
split_block(struct mem_block *p, uint64_t start, uint64_t size,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
/* Maybe cut off the start of an existing block */
|
||||
if (start > p->start) {
|
||||
|
@ -77,10 +78,9 @@ out:
|
|||
return p;
|
||||
}
|
||||
|
||||
struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap,
|
||||
uint64_t size,
|
||||
int align2,
|
||||
struct drm_file *file_priv)
|
||||
struct mem_block *
|
||||
nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
|
||||
int align2, struct drm_file *file_priv, int tail)
|
||||
{
|
||||
struct mem_block *p;
|
||||
uint64_t mask = (1 << align2) - 1;
|
||||
|
@ -88,10 +88,22 @@ struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap,
|
|||
if (!heap)
|
||||
return NULL;
|
||||
|
||||
list_for_each(p, heap) {
|
||||
uint64_t start = (p->start + mask) & ~mask;
|
||||
if (p->file_priv == 0 && start + size <= p->start + p->size)
|
||||
return split_block(p, start, size, file_priv);
|
||||
if (tail) {
|
||||
list_for_each_prev(p, heap) {
|
||||
uint64_t start = ((p->start + p->size) - size) & ~mask;
|
||||
|
||||
if (p->file_priv == 0 && start >= p->start &&
|
||||
start + size <= p->start + p->size)
|
||||
return split_block(p, start, size, file_priv);
|
||||
}
|
||||
} else {
|
||||
list_for_each(p, heap) {
|
||||
uint64_t start = (p->start + mask) & ~mask;
|
||||
|
||||
if (p->file_priv == 0 &&
|
||||
start + size <= p->start + p->size)
|
||||
return split_block(p, start, size, file_priv);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -563,13 +575,13 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
||||
uint64_t size, int flags,
|
||||
struct drm_file *file_priv)
|
||||
struct mem_block *
|
||||
nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size,
|
||||
int flags, struct drm_file *file_priv)
|
||||
{
|
||||
struct mem_block *block;
|
||||
int type;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct mem_block *block;
|
||||
int type, tail = !(flags & NOUVEAU_MEM_USER);
|
||||
|
||||
/*
|
||||
* Make things easier on ourselves: all allocations are page-aligned.
|
||||
|
@ -581,8 +593,11 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
|||
/* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB
|
||||
* page size in the GPU VM.
|
||||
*/
|
||||
if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50)
|
||||
size = (size + (64 * 1024)) & ~((64 * 1024) - 1);
|
||||
if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) {
|
||||
size = (size + 65535) & ~65535;
|
||||
if (alignment < 16)
|
||||
alignment = 16;
|
||||
}
|
||||
|
||||
/*
|
||||
* Warn about 0 sized allocations, but let it go through. It'll return 1 page
|
||||
|
@ -600,14 +615,14 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
|||
#define NOUVEAU_MEM_ALLOC_AGP {\
|
||||
type=NOUVEAU_MEM_AGP;\
|
||||
block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
|
||||
alignment, file_priv); \
|
||||
alignment, file_priv, tail); \
|
||||
if (block) goto alloc_ok;\
|
||||
}
|
||||
|
||||
#define NOUVEAU_MEM_ALLOC_PCI {\
|
||||
type = NOUVEAU_MEM_PCI;\
|
||||
block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
|
||||
alignment, file_priv); \
|
||||
alignment, file_priv, tail); \
|
||||
if ( block ) goto alloc_ok;\
|
||||
}
|
||||
|
||||
|
@ -616,11 +631,11 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
|||
if (!(flags&NOUVEAU_MEM_MAPPED)) {\
|
||||
block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
|
||||
size, alignment, \
|
||||
file_priv); \
|
||||
file_priv, tail); \
|
||||
if (block) goto alloc_ok;\
|
||||
}\
|
||||
block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
|
||||
alignment, file_priv);\
|
||||
alignment, file_priv, tail);\
|
||||
if (block) goto alloc_ok;\
|
||||
}
|
||||
|
||||
|
@ -644,6 +659,7 @@ alloc_ok:
|
|||
struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
|
||||
unsigned offset = block->start;
|
||||
unsigned count = block->size / 65536;
|
||||
unsigned tile = 0;
|
||||
|
||||
if (!pt) {
|
||||
DRM_ERROR("vm alloc without vm pt\n");
|
||||
|
@ -651,11 +667,22 @@ alloc_ok:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* The tiling stuff is *not* what NVIDIA does - but both the
|
||||
* 2D and 3D engines seem happy with this simpler method.
|
||||
* Should look into why NVIDIA do what they do at some point.
|
||||
*/
|
||||
if (flags & NOUVEAU_MEM_TILE) {
|
||||
if (flags & NOUVEAU_MEM_TILE_ZETA)
|
||||
tile = 0x00002800;
|
||||
else
|
||||
tile = 0x00007000;
|
||||
}
|
||||
|
||||
while (count--) {
|
||||
unsigned pte = offset / 65536;
|
||||
|
||||
INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
|
||||
INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000);
|
||||
INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
|
||||
offset += 65536;
|
||||
}
|
||||
} else {
|
||||
|
@ -738,8 +765,11 @@ out_free:
|
|||
* Ioctls
|
||||
*/
|
||||
|
||||
int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
int
|
||||
nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_mem_alloc *alloc = data;
|
||||
struct mem_block *block;
|
||||
|
||||
|
@ -748,18 +778,23 @@ int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file
|
|||
if (alloc->flags & NOUVEAU_MEM_INTERNAL)
|
||||
return -EINVAL;
|
||||
|
||||
block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
|
||||
alloc->flags, file_priv);
|
||||
block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
|
||||
alloc->flags | NOUVEAU_MEM_USER, file_priv);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
alloc->map_handle=block->map_handle;
|
||||
alloc->offset=block->start;
|
||||
alloc->flags=block->flags;
|
||||
|
||||
if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
|
||||
alloc->offset += 512*1024*1024;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
int
|
||||
nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_mem_free *memfree = data;
|
||||
|
@ -767,6 +802,9 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *
|
|||
|
||||
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
|
||||
|
||||
if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
|
||||
memfree->offset -= 512*1024*1024;
|
||||
|
||||
block=NULL;
|
||||
if (memfree->flags & NOUVEAU_MEM_FB)
|
||||
block = find_block(dev_priv->fb_heap, memfree->offset);
|
||||
|
@ -782,3 +820,53 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *
|
|||
nouveau_mem_free(dev, block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_ioctl_mem_tile(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_mem_tile *memtile = data;
|
||||
struct mem_block *block = NULL;
|
||||
|
||||
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
|
||||
|
||||
if (dev_priv->card_type < NV_50)
|
||||
return -EINVAL;
|
||||
|
||||
if (memtile->flags & NOUVEAU_MEM_FB) {
|
||||
memtile->offset -= 512*1024*1024;
|
||||
block = find_block(dev_priv->fb_heap, memtile->offset);
|
||||
}
|
||||
|
||||
if (!block)
|
||||
return -EINVAL;
|
||||
|
||||
if (block->file_priv != file_priv)
|
||||
return -EPERM;
|
||||
|
||||
{
|
||||
struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
|
||||
unsigned offset = block->start + memtile->delta;
|
||||
unsigned count = memtile->size / 65536;
|
||||
unsigned tile = 0;
|
||||
|
||||
if (memtile->flags & NOUVEAU_MEM_TILE) {
|
||||
if (memtile->flags & NOUVEAU_MEM_TILE_ZETA)
|
||||
tile = 0x00002800;
|
||||
else
|
||||
tile = 0x00007000;
|
||||
}
|
||||
|
||||
while (count--) {
|
||||
unsigned pte = offset / 65536;
|
||||
|
||||
INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
|
||||
INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
|
||||
offset += 65536;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
}
|
||||
|
||||
mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
|
||||
(struct drm_file *)-2);
|
||||
(struct drm_file *)-2, 0);
|
||||
if (!mem) {
|
||||
DRM_ERROR("Channel %d notifier block full\n", chan->id);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -248,7 +248,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
/* Allocate a chunk of the PRAMIN aperture */
|
||||
gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
|
||||
drm_order(align),
|
||||
(struct drm_file *)-2);
|
||||
(struct drm_file *)-2, 0);
|
||||
if (!gpuobj->im_pramin) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
return -ENOMEM;
|
||||
|
@ -1036,8 +1036,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
/* VRAM ctxdma */
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
512*1024*1024,
|
||||
dev_priv->fb_available_size,
|
||||
0, 0x100000000ULL,
|
||||
NV_DMA_ACCESS_RW,
|
||||
NV_DMA_TARGET_AGP, &vram);
|
||||
if (ret) {
|
||||
|
@ -1059,6 +1058,9 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
}
|
||||
|
||||
/* TT memory ctxdma */
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
tt = vram;
|
||||
} else
|
||||
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
|
||||
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
|
||||
dev_priv->gart_info.aper_size,
|
||||
|
|
|
@ -289,6 +289,7 @@ void
|
|||
nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_DEBUG("ch%d\n", chan->id);
|
||||
|
||||
|
@ -298,6 +299,9 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
if (chan->id == 0)
|
||||
nv50_fifo_channel_disable(dev, 127, 0);
|
||||
|
||||
if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id)
|
||||
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
}
|
||||
|
||||
|
|
|
@ -136,6 +136,18 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
|
|||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
/* flus cache and wait idle clean after cliprect change */
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_DC_FLUSH);
|
||||
ADVANCE_RING();
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
|
||||
ADVANCE_RING();
|
||||
/* set flush flag */
|
||||
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -166,13 +178,13 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(0x21DC, 1);
|
||||
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
|
||||
ADD_RANGE(R300_VAP_CLIP_X_0, 4);
|
||||
ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
|
||||
ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
|
||||
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
|
||||
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
|
||||
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
|
||||
ADD_RANGE(R300_GB_ENABLE, 1);
|
||||
ADD_RANGE(R300_GB_MSPOS0, 5);
|
||||
ADD_RANGE(R300_TX_CNTL, 1);
|
||||
ADD_RANGE(R300_TX_INVALTAGS, 1);
|
||||
ADD_RANGE(R300_TX_ENABLE, 1);
|
||||
ADD_RANGE(0x4200, 4);
|
||||
ADD_RANGE(0x4214, 1);
|
||||
|
@ -190,7 +202,7 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(0x42C0, 2);
|
||||
ADD_RANGE(R300_RS_CNTL_0, 2);
|
||||
|
||||
ADD_RANGE(0x43A4, 2);
|
||||
ADD_RANGE(R300_SC_HYPERZ, 2);
|
||||
ADD_RANGE(0x43E8, 1);
|
||||
|
||||
ADD_RANGE(0x46A4, 5);
|
||||
|
@ -209,14 +221,12 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(0x4E50, 9);
|
||||
ADD_RANGE(0x4E88, 1);
|
||||
ADD_RANGE(0x4EA0, 2);
|
||||
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
|
||||
ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
|
||||
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
|
||||
ADD_RANGE(0x4F28, 1);
|
||||
ADD_RANGE(0x4F30, 2);
|
||||
ADD_RANGE(0x4F44, 1);
|
||||
ADD_RANGE(0x4F54, 1);
|
||||
ADD_RANGE(R300_ZB_CNTL, 3);
|
||||
ADD_RANGE(R300_ZB_FORMAT, 4);
|
||||
ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
|
||||
ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
|
||||
ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
|
||||
|
||||
ADD_RANGE(R300_TX_FILTER_0, 16);
|
||||
ADD_RANGE(R300_TX_FILTER1_0, 16);
|
||||
|
@ -229,7 +239,7 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
|
||||
|
||||
/* Sporadic registers used as primitives are emitted */
|
||||
ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1);
|
||||
ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
|
||||
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
|
||||
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
|
||||
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
|
||||
|
@ -243,6 +253,7 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(R500_RS_INST_0, 16);
|
||||
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
|
||||
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
|
||||
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
|
||||
} else {
|
||||
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
||||
ADD_RANGE(R300_PFS_NODE_0, 4);
|
||||
|
@ -390,15 +401,28 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
|
|||
if (sz * 16 > cmdbuf->bufsz)
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_RING(5 + sz * 4);
|
||||
/* Wait for VAP to come to senses.. */
|
||||
/* there is no need to emit it multiple times, (only once before VAP is programmed,
|
||||
but this optimization is for later */
|
||||
OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
|
||||
/* VAP is very sensitive so we purge cache before we program it
|
||||
* and we also flush its state before & after */
|
||||
BEGIN_RING(6);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_DC_FLUSH);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
|
||||
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
|
||||
OUT_RING(0);
|
||||
ADVANCE_RING();
|
||||
/* set flush flag */
|
||||
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
|
||||
|
||||
BEGIN_RING(3 + sz * 4);
|
||||
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
|
||||
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
|
||||
ADVANCE_RING();
|
||||
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
|
||||
OUT_RING(0);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += sz * 16;
|
||||
|
@ -426,6 +450,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
|
|||
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
|
||||
ADVANCE_RING();
|
||||
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_DC_FLUSH);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
|
||||
ADVANCE_RING();
|
||||
/* set flush flag */
|
||||
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
|
||||
|
||||
cmdbuf->buf += 8 * 4;
|
||||
cmdbuf->bufsz -= 8 * 4;
|
||||
|
||||
|
@ -545,22 +578,23 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
u32 *cmd = (u32 *) cmdbuf->buf;
|
||||
int count, ret;
|
||||
u32 *cmd;
|
||||
int count;
|
||||
int expected_count;
|
||||
RING_LOCALS;
|
||||
|
||||
count=(cmd[0]>>16) & 0x3fff;
|
||||
cmd = (u32 *) cmdbuf->buf;
|
||||
count = (cmd[0]>>16) & 0x3fff;
|
||||
expected_count = cmd[1] >> 16;
|
||||
if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
|
||||
expected_count = (expected_count+1)/2;
|
||||
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = !radeon_check_offset(dev_priv, cmd[2]);
|
||||
if (ret) {
|
||||
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
|
||||
if (count && count != expected_count) {
|
||||
DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
|
||||
count, expected_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -572,6 +606,50 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
|
|||
cmdbuf->buf += (count+2)*4;
|
||||
cmdbuf->bufsz -= (count+2)*4;
|
||||
|
||||
if (!count) {
|
||||
drm_r300_cmd_header_t header;
|
||||
|
||||
if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
|
||||
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
header.u = *(unsigned int *)cmdbuf->buf;
|
||||
|
||||
cmdbuf->buf += sizeof(header);
|
||||
cmdbuf->bufsz -= sizeof(header);
|
||||
cmd = (u32 *) cmdbuf->buf;
|
||||
|
||||
if (header.header.cmd_type != R300_CMD_PACKET3 ||
|
||||
header.packet3.packet != R300_CMD_PACKET3_RAW ||
|
||||
cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
|
||||
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!radeon_check_offset(dev_priv, cmd[2])) {
|
||||
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cmd[3] != expected_count) {
|
||||
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
|
||||
cmd[3], expected_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(cmd[0]);
|
||||
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += 4*4;
|
||||
cmdbuf->bufsz -= 4*4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -615,11 +693,22 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
case RADEON_CNTL_BITBLT_MULTI:
|
||||
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
|
||||
|
||||
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
|
||||
return r300_emit_indx_buffer(dev_priv, cmdbuf);
|
||||
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
|
||||
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
|
||||
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
|
||||
case RADEON_CP_INDX_BUFFER:
|
||||
DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
|
||||
return -EINVAL;
|
||||
case RADEON_CP_3D_DRAW_IMMD_2:
|
||||
/* triggers drawing using in-packet vertex data */
|
||||
case RADEON_CP_3D_DRAW_VBUF_2:
|
||||
/* triggers drawing of vertex buffers setup elsewhere */
|
||||
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
|
||||
RADEON_PURGE_EMITED);
|
||||
break;
|
||||
case RADEON_CP_3D_DRAW_INDX_2:
|
||||
/* triggers drawing using indices to vertex buffer */
|
||||
/* whenever we send vertex we clear flush & purge */
|
||||
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
|
||||
RADEON_PURGE_EMITED);
|
||||
return r300_emit_draw_indx_2(dev_priv, cmdbuf);
|
||||
case RADEON_WAIT_FOR_IDLE:
|
||||
case RADEON_CP_NOP:
|
||||
/* these packets are safe */
|
||||
|
@ -715,16 +804,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
|
|||
*/
|
||||
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
|
||||
{
|
||||
uint32_t cache_z, cache_3d, cache_2d;
|
||||
RING_LOCALS;
|
||||
|
||||
BEGIN_RING(6);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_ZCACHE_UNKNOWN_03);
|
||||
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
|
||||
OUT_RING(0x0);
|
||||
cache_z = R300_ZC_FLUSH;
|
||||
cache_2d = R300_RB2D_DC_FLUSH;
|
||||
cache_3d = R300_RB3D_DC_FLUSH;
|
||||
if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
|
||||
/* we can purge, primitive where draw since last purge */
|
||||
cache_z |= R300_ZC_FREE;
|
||||
cache_2d |= R300_RB2D_DC_FREE;
|
||||
cache_3d |= R300_RB3D_DC_FREE;
|
||||
}
|
||||
|
||||
/* flush & purge zbuffer */
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
|
||||
OUT_RING(cache_z);
|
||||
ADVANCE_RING();
|
||||
/* flush & purge 3d */
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(cache_3d);
|
||||
ADVANCE_RING();
|
||||
/* flush & purge texture */
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
|
||||
OUT_RING(0);
|
||||
ADVANCE_RING();
|
||||
/* FIXME: is this one really needed ? */
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
|
||||
OUT_RING(0);
|
||||
ADVANCE_RING();
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
|
||||
ADVANCE_RING();
|
||||
/* flush & purge 2d through E2 as RB2D will trigger lockup */
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(cache_2d);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_HOST_IDLECLEAN);
|
||||
ADVANCE_RING();
|
||||
/* set flush & purge flags */
|
||||
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -905,8 +1031,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
/* See the comment above r300_emit_begin3d for why this call must be here,
|
||||
* and what the cleanup gotos are for. */
|
||||
/* pacify */
|
||||
r300_pacify(dev_priv);
|
||||
|
||||
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
|
||||
|
|
|
@ -320,7 +320,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
* Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
|
||||
* avoids bugs caused by still running shaders reading bad data from memory.
|
||||
*/
|
||||
#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
|
||||
#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
|
||||
|
||||
/* Absolutely no clue what this register is about. */
|
||||
#define R300_VAP_UNKNOWN_2288 0x2288
|
||||
|
@ -516,7 +516,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
/* gap */
|
||||
|
||||
/* Zero to flush caches. */
|
||||
#define R300_TX_CNTL 0x4100
|
||||
#define R300_TX_INVALTAGS 0x4100
|
||||
#define R300_TX_FLUSH 0x0
|
||||
|
||||
/* The upper enable bits are guessed, based on fglrx reported limits. */
|
||||
|
@ -705,6 +705,27 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
|
||||
/* END: Rasterization / Interpolators - many guesses */
|
||||
|
||||
/* Hierarchical Z Enable */
|
||||
#define R300_SC_HYPERZ 0x43a4
|
||||
# define R300_SC_HYPERZ_DISABLE (0 << 0)
|
||||
# define R300_SC_HYPERZ_ENABLE (1 << 0)
|
||||
# define R300_SC_HYPERZ_MIN (0 << 1)
|
||||
# define R300_SC_HYPERZ_MAX (1 << 1)
|
||||
# define R300_SC_HYPERZ_ADJ_256 (0 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_128 (1 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_64 (2 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_32 (3 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_16 (4 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_8 (5 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_4 (6 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_2 (7 << 2)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6)
|
||||
|
||||
#define R300_SC_EDGERULE 0x43a8
|
||||
|
||||
/* BEGIN: Scissors and cliprects */
|
||||
|
||||
/* There are four clipping rectangles. Their corner coordinates are inclusive.
|
||||
|
@ -1344,6 +1365,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
|
||||
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
|
||||
|
||||
#define R300_RB3D_AARESOLVE_CTL 0x4E88
|
||||
/* gap */
|
||||
|
||||
/* Guess by Vladimir.
|
||||
|
@ -1358,19 +1380,14 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
* for this.
|
||||
* Bit (1<<8) is the "test" bit. so plain write is 6 - vd
|
||||
*/
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
|
||||
# define R300_RB3D_Z_DISABLED_1 0x00000010
|
||||
# define R300_RB3D_Z_DISABLED_2 0x00000014
|
||||
# define R300_RB3D_Z_TEST 0x00000012
|
||||
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
#define R300_ZB_CNTL 0x4F00
|
||||
# define R300_STENCIL_ENABLE (1 << 0)
|
||||
# define R300_Z_ENABLE (1 << 1)
|
||||
# define R300_Z_WRITE_ENABLE (1 << 2)
|
||||
# define R300_Z_SIGNED_COMPARE (1 << 3)
|
||||
# define R300_STENCIL_FRONT_BACK (1 << 4)
|
||||
|
||||
# define R300_RB3D_Z_TEST 0x00000012
|
||||
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
# define R300_RB3D_STENCIL_ENABLE 0x00000001
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
|
||||
#define R300_ZB_ZSTENCILCNTL 0x4f04
|
||||
/* functions */
|
||||
# define R300_ZS_NEVER 0
|
||||
# define R300_ZS_LESS 1
|
||||
|
@ -1390,52 +1407,166 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R300_ZS_INVERT 5
|
||||
# define R300_ZS_INCR_WRAP 6
|
||||
# define R300_ZS_DECR_WRAP 7
|
||||
# define R300_Z_FUNC_SHIFT 0
|
||||
/* front and back refer to operations done for front
|
||||
and back faces, i.e. separate stencil function support */
|
||||
# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
|
||||
# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
|
||||
# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
|
||||
# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
|
||||
# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
|
||||
# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
|
||||
# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
|
||||
# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
|
||||
# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
|
||||
# define R300_S_FRONT_FUNC_SHIFT 3
|
||||
# define R300_S_FRONT_SFAIL_OP_SHIFT 6
|
||||
# define R300_S_FRONT_ZPASS_OP_SHIFT 9
|
||||
# define R300_S_FRONT_ZFAIL_OP_SHIFT 12
|
||||
# define R300_S_BACK_FUNC_SHIFT 15
|
||||
# define R300_S_BACK_SFAIL_OP_SHIFT 18
|
||||
# define R300_S_BACK_ZPASS_OP_SHIFT 21
|
||||
# define R300_S_BACK_ZFAIL_OP_SHIFT 24
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
|
||||
# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
|
||||
# define R300_RB3D_ZS2_STENCIL_MASK 0xFF
|
||||
# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
|
||||
# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
|
||||
#define R300_ZB_STENCILREFMASK 0x4f08
|
||||
# define R300_STENCILREF_SHIFT 0
|
||||
# define R300_STENCILREF_MASK 0x000000ff
|
||||
# define R300_STENCILMASK_SHIFT 8
|
||||
# define R300_STENCILMASK_MASK 0x0000ff00
|
||||
# define R300_STENCILWRITEMASK_SHIFT 16
|
||||
# define R300_STENCILWRITEMASK_MASK 0x00ff0000
|
||||
|
||||
/* gap */
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
|
||||
# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
|
||||
# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
|
||||
/* 16 bit format or some aditional bit ? */
|
||||
# define R300_DEPTH_FORMAT_UNK32 (32 << 0)
|
||||
#define R300_ZB_FORMAT 0x4f10
|
||||
# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0)
|
||||
# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0)
|
||||
# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0)
|
||||
/* reserved up to (15 << 0) */
|
||||
# define R300_INVERT_13E3_LEADING_ONES (0 << 4)
|
||||
# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
|
||||
|
||||
#define R300_RB3D_EARLY_Z 0x4F14
|
||||
# define R300_EARLY_Z_DISABLE (0 << 0)
|
||||
# define R300_EARLY_Z_ENABLE (1 << 0)
|
||||
#define R300_ZB_ZTOP 0x4F14
|
||||
# define R300_ZTOP_DISABLE (0 << 0)
|
||||
# define R300_ZTOP_ENABLE (1 << 0)
|
||||
|
||||
/* gap */
|
||||
|
||||
#define R300_RB3D_ZCACHE_CTLSTAT 0x4F18 /* GUESS */
|
||||
# define R300_RB3D_ZCACHE_UNKNOWN_01 0x1
|
||||
# define R300_RB3D_ZCACHE_UNKNOWN_03 0x3
|
||||
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31)
|
||||
|
||||
#define R300_ZB_BW_CNTL 0x4f1c
|
||||
# define R300_HIZ_DISABLE (0 << 0)
|
||||
# define R300_HIZ_ENABLE (1 << 0)
|
||||
# define R300_HIZ_MIN (0 << 1)
|
||||
# define R300_HIZ_MAX (1 << 1)
|
||||
# define R300_FAST_FILL_DISABLE (0 << 2)
|
||||
# define R300_FAST_FILL_ENABLE (1 << 2)
|
||||
# define R300_RD_COMP_DISABLE (0 << 3)
|
||||
# define R300_RD_COMP_ENABLE (1 << 3)
|
||||
# define R300_WR_COMP_DISABLE (0 << 4)
|
||||
# define R300_WR_COMP_ENABLE (1 << 4)
|
||||
# define R300_ZB_CB_CLEAR_RMW (0 << 5)
|
||||
# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5)
|
||||
# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6)
|
||||
# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6)
|
||||
|
||||
# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7)
|
||||
# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7)
|
||||
# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8)
|
||||
# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8)
|
||||
|
||||
# define R500_BMASK_ENABLE (0 << 10)
|
||||
# define R500_BMASK_DISABLE (1 << 10)
|
||||
# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11)
|
||||
# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11)
|
||||
# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_1 (1 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_2 (2 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_3 (3 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_4 (4 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_5 (5 << 12)
|
||||
# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15)
|
||||
# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15)
|
||||
# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16)
|
||||
# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16)
|
||||
# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17)
|
||||
# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17)
|
||||
# define R500_PEQ_PACKING_DISABLE (0 << 18)
|
||||
# define R500_PEQ_PACKING_ENABLE (1 << 18)
|
||||
# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18)
|
||||
# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18)
|
||||
|
||||
|
||||
/* gap */
|
||||
|
||||
#define R300_RB3D_DEPTHOFFSET 0x4F20
|
||||
#define R300_RB3D_DEPTHPITCH 0x4F24
|
||||
# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
|
||||
# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
|
||||
# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
|
||||
# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
|
||||
# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
|
||||
# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
|
||||
/* Z Buffer Address Offset.
|
||||
* Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
|
||||
*/
|
||||
#define R300_ZB_DEPTHOFFSET 0x4f20
|
||||
|
||||
/* Z Buffer Pitch and Endian Control */
|
||||
#define R300_ZB_DEPTHPITCH 0x4f24
|
||||
# define R300_DEPTHPITCH_MASK 0x00003FFC
|
||||
# define R300_DEPTHMACROTILE_DISABLE (0 << 16)
|
||||
# define R300_DEPTHMACROTILE_ENABLE (1 << 16)
|
||||
# define R300_DEPTHMICROTILE_LINEAR (0 << 17)
|
||||
# define R300_DEPTHMICROTILE_TILED (1 << 17)
|
||||
# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
|
||||
# define R300_DEPTHENDIAN_NO_SWAP (0 << 18)
|
||||
# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18)
|
||||
# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18)
|
||||
# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
|
||||
|
||||
/* Z Buffer Clear Value */
|
||||
#define R300_ZB_DEPTHCLEARVALUE 0x4f28
|
||||
|
||||
#define R300_ZB_ZMASK_OFFSET 0x4f30
|
||||
#define R300_ZB_ZMASK_PITCH 0x4f34
|
||||
#define R300_ZB_ZMASK_WRINDEX 0x4f38
|
||||
#define R300_ZB_ZMASK_DWORD 0x4f3c
|
||||
#define R300_ZB_ZMASK_RDINDEX 0x4f40
|
||||
|
||||
/* Hierarchical Z Memory Offset */
|
||||
#define R300_ZB_HIZ_OFFSET 0x4f44
|
||||
|
||||
/* Hierarchical Z Write Index */
|
||||
#define R300_ZB_HIZ_WRINDEX 0x4f48
|
||||
|
||||
/* Hierarchical Z Data */
|
||||
#define R300_ZB_HIZ_DWORD 0x4f4c
|
||||
|
||||
/* Hierarchical Z Read Index */
|
||||
#define R300_ZB_HIZ_RDINDEX 0x4f50
|
||||
|
||||
/* Hierarchical Z Pitch */
|
||||
#define R300_ZB_HIZ_PITCH 0x4f54
|
||||
|
||||
/* Z Buffer Z Pass Counter Data */
|
||||
#define R300_ZB_ZPASS_DATA 0x4f58
|
||||
|
||||
/* Z Buffer Z Pass Counter Address */
|
||||
#define R300_ZB_ZPASS_ADDR 0x4f5c
|
||||
|
||||
/* Depth buffer X and Y coordinate offset */
|
||||
#define R300_ZB_DEPTHXY_OFFSET 0x4f60
|
||||
# define R300_DEPTHX_OFFSET_SHIFT 1
|
||||
# define R300_DEPTHX_OFFSET_MASK 0x000007FE
|
||||
# define R300_DEPTHY_OFFSET_SHIFT 17
|
||||
# define R300_DEPTHY_OFFSET_MASK 0x07FE0000
|
||||
|
||||
/* Sets the fifo sizes */
|
||||
#define R500_ZB_FIFO_SIZE 0x4fd0
|
||||
# define R500_OP_FIFO_SIZE_FULL (0 << 0)
|
||||
# define R500_OP_FIFO_SIZE_HALF (1 << 0)
|
||||
# define R500_OP_FIFO_SIZE_QUATER (2 << 0)
|
||||
# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
|
||||
|
||||
/* Stencil Reference Value and Mask for backfacing quads */
|
||||
/* R300_ZB_STENCILREFMASK handles front face */
|
||||
#define R500_ZB_STENCILREFMASK_BF 0x4fd4
|
||||
# define R500_STENCILREF_SHIFT 0
|
||||
# define R500_STENCILREF_MASK 0x000000ff
|
||||
# define R500_STENCILMASK_SHIFT 8
|
||||
# define R500_STENCILMASK_MASK 0x0000ff00
|
||||
# define R500_STENCILWRITEMASK_SHIFT 16
|
||||
# define R500_STENCILWRITEMASK_MASK 0x00ff0000
|
||||
|
||||
/* BEGIN: Vertex program instruction set */
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#define RADEON_FIFO_DEBUG 0
|
||||
|
||||
static int radeon_do_cleanup_cp(struct drm_device * dev);
|
||||
static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
|
||||
|
||||
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
|
||||
{
|
||||
|
@ -126,8 +127,12 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
|
|||
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
|
||||
R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
|
||||
R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
|
||||
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
|
||||
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
|
||||
RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
|
||||
} else {
|
||||
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_base_lo);
|
||||
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
|
||||
RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
|
||||
}
|
||||
|
@ -194,23 +199,8 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
|
|||
DRM_UDELAY(1);
|
||||
}
|
||||
} else {
|
||||
/* 3D */
|
||||
tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT);
|
||||
tmp |= RADEON_RB3D_DC_FLUSH_ALL;
|
||||
RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
|
||||
|
||||
/* 2D */
|
||||
tmp = RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT);
|
||||
tmp |= RADEON_RB3D_DC_FLUSH_ALL;
|
||||
RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
|
||||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
if (!(RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT)
|
||||
& RADEON_RB3D_DC_BUSY)) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
/* don't flush or purge cache here or lockup */
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if RADEON_FIFO_DEBUG
|
||||
|
@ -233,6 +223,9 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
|
|||
return 0;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
|
||||
RADEON_READ(RADEON_RBBM_STATUS),
|
||||
RADEON_READ(R300_VAP_CNTL_STATUS));
|
||||
|
||||
#if RADEON_FIFO_DEBUG
|
||||
DRM_ERROR("failed!\n");
|
||||
|
@ -259,6 +252,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
|
|||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
|
||||
RADEON_READ(RADEON_RBBM_STATUS),
|
||||
RADEON_READ(R300_VAP_CNTL_STATUS));
|
||||
|
||||
#if RADEON_FIFO_DEBUG
|
||||
DRM_ERROR("failed!\n");
|
||||
|
@ -352,6 +348,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
|
|||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
|
||||
DRM_INFO("Loading R300 Microcode\n");
|
||||
for (i = 0; i < 256; i++) {
|
||||
|
@ -440,14 +437,20 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
|
|||
|
||||
dev_priv->cp_running = 1;
|
||||
|
||||
BEGIN_RING(6);
|
||||
|
||||
BEGIN_RING(8);
|
||||
/* isync can only be written through cp on r5xx write it here */
|
||||
OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
|
||||
OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
|
||||
RADEON_ISYNC_ANY3D_IDLE2D |
|
||||
RADEON_ISYNC_WAIT_IDLEGUI |
|
||||
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
|
||||
RADEON_PURGE_CACHE();
|
||||
RADEON_PURGE_ZCACHE();
|
||||
RADEON_WAIT_UNTIL_IDLE();
|
||||
|
||||
ADVANCE_RING();
|
||||
COMMIT_RING();
|
||||
|
||||
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
|
||||
}
|
||||
|
||||
/* Reset the Command Processor. This will not flush any pending
|
||||
|
@ -741,14 +744,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
|
|||
IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
|
||||
RS480_REQ_TYPE_SNOOP_DIS));
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
|
||||
IGP_WRITE_MCIND(RS690_MC_AGP_BASE,
|
||||
(unsigned int)dev_priv->gart_vm_start);
|
||||
IGP_WRITE_MCIND(RS690_MC_AGP_BASE_2, 0);
|
||||
} else {
|
||||
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
|
||||
RADEON_WRITE(RS480_AGP_BASE_2, 0);
|
||||
}
|
||||
radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
|
||||
|
||||
dev_priv->gart_size = 32*1024*1024;
|
||||
temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
|
||||
|
@ -897,17 +893,6 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
*/
|
||||
dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
|
||||
|
||||
switch(init->func) {
|
||||
case RADEON_INIT_R200_CP:
|
||||
dev_priv->microcode_version = UCODE_R200;
|
||||
break;
|
||||
case RADEON_INIT_R300_CP:
|
||||
dev_priv->microcode_version = UCODE_R300;
|
||||
break;
|
||||
default:
|
||||
dev_priv->microcode_version = UCODE_R100;
|
||||
}
|
||||
|
||||
dev_priv->do_boxes = 0;
|
||||
dev_priv->cp_mode = init->cp_mode;
|
||||
|
||||
|
@ -955,8 +940,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
*/
|
||||
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
|
||||
(dev_priv->color_fmt << 10) |
|
||||
(dev_priv->microcode_version ==
|
||||
UCODE_R100 ? RADEON_ZBLOCK16 : 0));
|
||||
(dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
|
||||
|
||||
dev_priv->depth_clear.rb3d_zstencilcntl =
|
||||
(dev_priv->depth_fmt |
|
||||
|
@ -1162,7 +1146,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
dev_priv->gart_info.mapping.size =
|
||||
dev_priv->gart_info.table_size;
|
||||
|
||||
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
|
||||
drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
|
||||
dev_priv->gart_info.addr =
|
||||
dev_priv->gart_info.mapping.handle;
|
||||
|
||||
|
@ -1296,6 +1280,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
|
|||
radeon_cp_init_ring_buffer(dev, dev_priv);
|
||||
|
||||
radeon_do_engine_reset(dev);
|
||||
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
|
||||
|
||||
DRM_DEBUG("radeon_do_resume_cp() complete\n");
|
||||
|
||||
|
@ -1734,6 +1719,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
break;
|
||||
}
|
||||
|
||||
dev_priv->chip_family = flags & RADEON_FAMILY_MASK;
|
||||
if (drm_device_is_agp(dev))
|
||||
dev_priv->flags |= RADEON_IS_AGP;
|
||||
else if (drm_device_is_pcie(dev))
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
#define DRIVER_NAME "radeon"
|
||||
#define DRIVER_DESC "ATI Radeon"
|
||||
#define DRIVER_DATE "20080528"
|
||||
#define DRIVER_DATE "20080613"
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
|
@ -124,6 +124,7 @@ enum radeon_family {
|
|||
CHIP_RV380,
|
||||
CHIP_R420,
|
||||
CHIP_RV410,
|
||||
CHIP_RS400,
|
||||
CHIP_RS480,
|
||||
CHIP_RS690,
|
||||
CHIP_RV515,
|
||||
|
@ -135,12 +136,6 @@ enum radeon_family {
|
|||
CHIP_LAST,
|
||||
};
|
||||
|
||||
enum radeon_cp_microcode_version {
|
||||
UCODE_R100,
|
||||
UCODE_R200,
|
||||
UCODE_R300,
|
||||
};
|
||||
|
||||
/*
|
||||
* Chip flags
|
||||
*/
|
||||
|
@ -221,6 +216,9 @@ struct radeon_virt_surface {
|
|||
struct drm_file *file_priv;
|
||||
};
|
||||
|
||||
#define RADEON_FLUSH_EMITED (1 < 0)
|
||||
#define RADEON_PURGE_EMITED (1 < 1)
|
||||
|
||||
typedef struct drm_radeon_private {
|
||||
|
||||
drm_radeon_ring_buffer_t ring;
|
||||
|
@ -245,8 +243,6 @@ typedef struct drm_radeon_private {
|
|||
|
||||
int usec_timeout;
|
||||
|
||||
int microcode_version;
|
||||
|
||||
struct {
|
||||
u32 boxes;
|
||||
int freelist_timeouts;
|
||||
|
@ -316,6 +312,8 @@ typedef struct drm_radeon_private {
|
|||
unsigned long fb_aper_offset;
|
||||
|
||||
int num_gb_pipes;
|
||||
int track_flush;
|
||||
uint32_t chip_family; /* extract from flags */
|
||||
} drm_radeon_private_t;
|
||||
|
||||
typedef struct drm_radeon_buf_priv {
|
||||
|
@ -375,6 +373,7 @@ extern void radeon_mem_release(struct drm_file *file_priv,
|
|||
struct mem_block *heap);
|
||||
|
||||
/* radeon_irq.c */
|
||||
extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
|
||||
extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
|
||||
|
@ -671,11 +670,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
#define RADEON_PP_TXFILTER_1 0x1c6c
|
||||
#define RADEON_PP_TXFILTER_2 0x1c84
|
||||
|
||||
#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
|
||||
# define RADEON_RB2D_DC_FLUSH (3 << 0)
|
||||
# define RADEON_RB2D_DC_FREE (3 << 2)
|
||||
# define RADEON_RB2D_DC_FLUSH_ALL 0xf
|
||||
# define RADEON_RB2D_DC_BUSY (1 << 31)
|
||||
#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */
|
||||
#define R300_DSTCACHE_CTLSTAT 0x1714
|
||||
# define R300_RB2D_DC_FLUSH (3 << 0)
|
||||
# define R300_RB2D_DC_FREE (3 << 2)
|
||||
# define R300_RB2D_DC_FLUSH_ALL 0xf
|
||||
# define R300_RB2D_DC_BUSY (1 << 31)
|
||||
#define RADEON_RB3D_CNTL 0x1c3c
|
||||
# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
|
||||
# define RADEON_PLANE_MASK_ENABLE (1 << 1)
|
||||
|
@ -701,7 +701,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
|
||||
# define R300_ZC_FLUSH (1 << 0)
|
||||
# define R300_ZC_FREE (1 << 1)
|
||||
# define R300_ZC_FLUSH_ALL 0x3
|
||||
# define R300_ZC_BUSY (1 << 31)
|
||||
#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
|
||||
# define RADEON_RB3D_DC_FLUSH (3 << 0)
|
||||
|
@ -709,6 +708,8 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
# define RADEON_RB3D_DC_FLUSH_ALL 0xf
|
||||
# define RADEON_RB3D_DC_BUSY (1 << 31)
|
||||
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
|
||||
# define R300_RB3D_DC_FLUSH (2 << 0)
|
||||
# define R300_RB3D_DC_FREE (2 << 2)
|
||||
# define R300_RB3D_DC_FINISH (1 << 4)
|
||||
#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
|
||||
# define RADEON_Z_TEST_MASK (7 << 4)
|
||||
|
@ -1278,21 +1279,21 @@ do { \
|
|||
|
||||
#define RADEON_FLUSH_CACHE() do { \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_DC_FLUSH ); \
|
||||
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_DC_FLUSH); \
|
||||
} else { \
|
||||
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_DC_FLUSH ); \
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(R300_RB3D_DC_FLUSH); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_PURGE_CACHE() do { \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \
|
||||
OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
|
||||
} else { \
|
||||
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -1308,11 +1309,11 @@ do { \
|
|||
|
||||
#define RADEON_PURGE_ZCACHE() do { \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
|
||||
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
|
||||
} else { \
|
||||
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( R300_ZC_FLUSH_ALL ); \
|
||||
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "radeon_drm.h"
|
||||
#include "radeon_drv.h"
|
||||
|
||||
static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
|
||||
void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -254,35 +254,27 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
|
|||
u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
u32 crtc_cnt_reg, crtc_status_reg;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (crtc < 0 || crtc > 1) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
|
||||
if (crtc == 0) {
|
||||
crtc_cnt_reg = R500_D1CRTC_FRAME_COUNT;
|
||||
crtc_status_reg = R500_D1CRTC_STATUS;
|
||||
} else if (crtc == 1) {
|
||||
crtc_cnt_reg = R500_D2CRTC_FRAME_COUNT;
|
||||
crtc_status_reg = R500_D2CRTC_STATUS;
|
||||
} else
|
||||
return -EINVAL;
|
||||
return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
|
||||
|
||||
if (crtc == 0)
|
||||
return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
|
||||
else
|
||||
return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
|
||||
} else {
|
||||
if (crtc == 0) {
|
||||
crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME;
|
||||
crtc_status_reg = RADEON_CRTC_STATUS;
|
||||
} else if (crtc == 1) {
|
||||
crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
|
||||
crtc_status_reg = RADEON_CRTC2_STATUS;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
|
||||
if (crtc == 0)
|
||||
return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
|
||||
else
|
||||
return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -382,27 +374,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
|
|||
int radeon_vblank_crtc_get(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
u32 flag;
|
||||
u32 value;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
|
||||
flag = RADEON_READ(R500_DxMODE_INT_MASK);
|
||||
value = 0;
|
||||
if (flag & R500_D1MODE_INT_MASK)
|
||||
value |= DRM_RADEON_VBLANK_CRTC1;
|
||||
|
||||
if (flag & R500_D2MODE_INT_MASK)
|
||||
value |= DRM_RADEON_VBLANK_CRTC2;
|
||||
} else {
|
||||
flag = RADEON_READ(RADEON_GEN_INT_CNTL);
|
||||
value = 0;
|
||||
if (flag & RADEON_CRTC_VBLANK_MASK)
|
||||
value |= DRM_RADEON_VBLANK_CRTC1;
|
||||
|
||||
if (flag & RADEON_CRTC2_VBLANK_MASK)
|
||||
value |= DRM_RADEON_VBLANK_CRTC2;
|
||||
}
|
||||
return value;
|
||||
return dev_priv->vblank_crtc;
|
||||
}
|
||||
|
||||
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
|
||||
|
|
|
@ -88,7 +88,7 @@ static struct mem_block *alloc_block(struct mem_block *heap, int size,
|
|||
|
||||
list_for_each(p, heap) {
|
||||
int start = (p->start + mask) & ~mask;
|
||||
if (p->file_priv == 0 && start + size <= p->start + p->size)
|
||||
if (p->file_priv == NULL && start + size <= p->start + p->size)
|
||||
return split_block(p, start, size, file_priv);
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ static void free_block(struct mem_block *p)
|
|||
/* Assumes a single contiguous range. Needs a special file_priv in
|
||||
* 'heap' to stop it being subsumed.
|
||||
*/
|
||||
if (p->next->file_priv == 0) {
|
||||
if (p->next->file_priv == NULL) {
|
||||
struct mem_block *q = p->next;
|
||||
p->size += q->size;
|
||||
p->next = q->next;
|
||||
|
@ -121,7 +121,7 @@ static void free_block(struct mem_block *p)
|
|||
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
|
||||
}
|
||||
|
||||
if (p->prev->file_priv == 0) {
|
||||
if (p->prev->file_priv == NULL) {
|
||||
struct mem_block *q = p->prev;
|
||||
q->size += p->size;
|
||||
q->next = p->next;
|
||||
|
@ -174,7 +174,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
|
|||
* 'heap' to stop it being subsumed.
|
||||
*/
|
||||
list_for_each(p, heap) {
|
||||
while (p->file_priv == 0 && p->next->file_priv == 0) {
|
||||
while (p->file_priv == NULL && p->next->file_priv == NULL) {
|
||||
struct mem_block *q = p->next;
|
||||
p->size += q->size;
|
||||
p->next = q->next;
|
||||
|
|
|
@ -305,8 +305,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
case RADEON_CP_3D_DRAW_INDX_2:
|
||||
case RADEON_3D_CLEAR_HIZ:
|
||||
/* safe but r200 only */
|
||||
if (dev_priv->microcode_version != UCODE_R200) {
|
||||
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
|
||||
if ((dev_priv->chip_family < CHIP_R200) ||
|
||||
(dev_priv->chip_family > CHIP_RV280)) {
|
||||
DRM_ERROR("Invalid 3d packet for non r200-class chip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
@ -359,8 +360,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
break;
|
||||
|
||||
case RADEON_3D_RNDR_GEN_INDX_PRIM:
|
||||
if (dev_priv->microcode_version != UCODE_R100) {
|
||||
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
|
||||
if (dev_priv->chip_family > CHIP_RS200) {
|
||||
DRM_ERROR("Invalid 3d packet for non-r100-class chip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
|
||||
|
@ -370,8 +371,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
break;
|
||||
|
||||
case RADEON_CP_INDX_BUFFER:
|
||||
if (dev_priv->microcode_version != UCODE_R200) {
|
||||
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
|
||||
/* safe but r200 only */
|
||||
if ((dev_priv->chip_family < CHIP_R200) ||
|
||||
(dev_priv->chip_family > CHIP_RV280)) {
|
||||
DRM_ERROR("Invalid 3d packet for non-r200-class chip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
|
@ -1015,7 +1018,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
|
|||
int tileoffset, nrtilesx, nrtilesy, j;
|
||||
/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
|
||||
if ((dev_priv->flags & RADEON_HAS_HIERZ)
|
||||
&& !(dev_priv->microcode_version == UCODE_R200)) {
|
||||
&& (dev_priv->chip_family < CHIP_R200)) {
|
||||
/* FIXME : figure this out for r200 (when hierz is enabled). Or
|
||||
maybe r200 actually doesn't need to put the low-res z value into
|
||||
the tile cache like r100, but just needs to clear the hi-level z-buffer?
|
||||
|
@ -1044,7 +1047,8 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
|
|||
ADVANCE_RING();
|
||||
tileoffset += depthpixperline >> 6;
|
||||
}
|
||||
} else if (dev_priv->microcode_version == UCODE_R200) {
|
||||
} else if ((dev_priv->chip_family >= CHIP_R200) &&
|
||||
(dev_priv->chip_family <= CHIP_RV280)) {
|
||||
/* works for rv250. */
|
||||
/* find first macro tile (8x2 4x4 z-pixels on rv250) */
|
||||
tileoffset =
|
||||
|
@ -1099,7 +1103,8 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
|
|||
|
||||
/* TODO don't always clear all hi-level z tiles */
|
||||
if ((dev_priv->flags & RADEON_HAS_HIERZ)
|
||||
&& (dev_priv->microcode_version == UCODE_R200)
|
||||
&& ((dev_priv->chip_family >= CHIP_R200) &&
|
||||
(dev_priv->chip_family <= CHIP_RV280))
|
||||
&& (flags & RADEON_USE_HIERZ))
|
||||
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
|
||||
/* FIXME : the mask supposedly contains low-res z values. So can't set
|
||||
|
@ -1119,8 +1124,9 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
|
|||
* rendering a quad into just those buffers. Thus, we have to
|
||||
* make sure the 3D engine is configured correctly.
|
||||
*/
|
||||
else if ((dev_priv->microcode_version == UCODE_R200) &&
|
||||
(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
else if ((dev_priv->chip_family >= CHIP_R200) &&
|
||||
(dev_priv->chip_family <= CHIP_RV280) &&
|
||||
(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
|
||||
int tempPP_CNTL;
|
||||
int tempRE_CNTL;
|
||||
|
@ -2889,7 +2895,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
|
|||
|
||||
orig_nbox = cmdbuf->nbox;
|
||||
|
||||
if (dev_priv->microcode_version == UCODE_R300) {
|
||||
if (dev_priv->chip_family >= CHIP_R300) {
|
||||
int temp;
|
||||
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
|
||||
|
||||
|
|
|
@ -123,11 +123,15 @@ struct xgi_state_info {
|
|||
#define DRM_XGI_FREE 2
|
||||
#define DRM_XGI_SUBMIT_CMDLIST 3
|
||||
#define DRM_XGI_STATE_CHANGE 4
|
||||
#define DRM_XGI_SET_FENCE 5
|
||||
#define DRM_XGI_WAIT_FENCE 6
|
||||
|
||||
#define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
|
||||
#define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
|
||||
#define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
|
||||
#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
|
||||
#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
|
||||
#define XGI_IOCTL_SET_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_SET_FENCE, u32)
|
||||
#define XGI_IOCTL_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_WAIT_FENCE, u32)
|
||||
|
||||
#endif /* _XGI_DRM_H_ */
|
||||
|
|
Loading…
Reference in New Issue