Merge branch 'master' into modesetting-101
Conflicts: linux-core/Makefile.kernel linux-core/drm_compat.c linux-core/drm_fops.c linux-core/drm_lock.c shared-core/drm.h shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.cmain
commit
7f269bec7e
|
@ -57,7 +57,7 @@
|
|||
|
||||
#include "xf86drm.h"
|
||||
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
|
||||
#define DRM_MAJOR 145
|
||||
#endif
|
||||
|
||||
|
|
|
@ -340,6 +340,11 @@ ifneq (,$(findstring i915,$(DRM_MODULES)))
|
|||
CONFIG_DRM_I915 := m
|
||||
endif
|
||||
|
||||
GIT_REVISION := $(shell cd "$(DRMSRCDIR)" && git-describe --abbrev=17)
|
||||
ifneq ($(GIT_REVISION),)
|
||||
EXTRA_CFLAGS+=-D"GIT_REVISION=\"$(GIT_REVISION)\""
|
||||
endif
|
||||
|
||||
include $(DRMSRCDIR)/Makefile.kernel
|
||||
|
||||
# Depencencies
|
||||
|
|
|
@ -20,13 +20,14 @@ r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
|||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||
i810-objs := i810_drv.o i810_dma.o
|
||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||
i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
|
||||
i915_buffer.o i915_execbuf.o \
|
||||
intel_display.o intel_crt.o intel_lvds.o \
|
||||
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
|
||||
intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
|
||||
dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
|
||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
||||
nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
|
||||
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
|
||||
nv04_timer.o \
|
||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||
nv04_fb.o nv10_fb.o nv40_fb.o \
|
||||
|
|
|
@ -34,51 +34,23 @@
|
|||
#include "drmP.h"
|
||||
|
||||
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
|
||||
|
||||
static void *drm_ati_alloc_pcigart_table(int order)
|
||||
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
unsigned long address;
|
||||
struct page *page;
|
||||
int i;
|
||||
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
|
||||
PAGE_SIZE,
|
||||
gart_info->table_mask);
|
||||
if (gart_info->table_handle == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
DRM_DEBUG("%d order\n", order);
|
||||
|
||||
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
|
||||
order);
|
||||
if (address == 0UL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = virt_to_page(address);
|
||||
|
||||
for (i = 0; i < order; i++, page++) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
|
||||
get_page(page);
|
||||
#endif
|
||||
SetPageReserved(page);
|
||||
}
|
||||
|
||||
DRM_DEBUG("returning 0x%08lx\n", address);
|
||||
return (void *)address;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_ati_free_pcigart_table(void *address, int order)
|
||||
static void drm_ati_free_pcigart_table(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
int num_pages = 1 << order;
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
page = virt_to_page((unsigned long)address);
|
||||
|
||||
for (i = 0; i < num_pages; i++, page++) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
|
||||
__put_page(page);
|
||||
#endif
|
||||
ClearPageReserved(page);
|
||||
}
|
||||
|
||||
free_pages((unsigned long)address, order);
|
||||
drm_pci_free(dev, gart_info->table_handle);
|
||||
gart_info->table_handle = NULL;
|
||||
}
|
||||
|
||||
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
|
||||
|
@ -86,8 +58,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
|||
struct drm_sg_mem *entry = dev->sg;
|
||||
unsigned long pages;
|
||||
int i;
|
||||
int order;
|
||||
int num_pages, max_pages;
|
||||
int max_pages;
|
||||
|
||||
/* we need to support large memory configurations */
|
||||
if (!entry) {
|
||||
|
@ -95,15 +66,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
|||
return 0;
|
||||
}
|
||||
|
||||
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
|
||||
num_pages = 1 << order;
|
||||
|
||||
if (gart_info->bus_addr) {
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
|
||||
pci_unmap_single(dev->pdev, gart_info->bus_addr,
|
||||
num_pages * PAGE_SIZE,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
max_pages = (gart_info->table_size / sizeof(u32));
|
||||
pages = (entry->pages <= max_pages)
|
||||
|
@ -122,10 +85,9 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
|||
|
||||
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
|
||||
&& gart_info->addr) {
|
||||
&& gart_info->table_handle) {
|
||||
|
||||
drm_ati_free_pcigart_table(gart_info->addr, order);
|
||||
gart_info->addr = NULL;
|
||||
drm_ati_free_pcigart_table(dev, gart_info);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -137,11 +99,10 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
struct drm_sg_mem *entry = dev->sg;
|
||||
void *address = NULL;
|
||||
unsigned long pages;
|
||||
u32 *pci_gart, page_base, bus_address = 0;
|
||||
u32 *pci_gart, page_base;
|
||||
dma_addr_t bus_address = 0;
|
||||
int i, j, ret = 0;
|
||||
int order;
|
||||
int max_pages;
|
||||
int num_pages;
|
||||
|
||||
if (!entry) {
|
||||
DRM_ERROR("no scatter/gather memory!\n");
|
||||
|
@ -151,31 +112,14 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
|
||||
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
|
||||
|
||||
order = drm_order((gart_info->table_size +
|
||||
(PAGE_SIZE-1)) / PAGE_SIZE);
|
||||
num_pages = 1 << order;
|
||||
address = drm_ati_alloc_pcigart_table(order);
|
||||
if (!address) {
|
||||
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
|
||||
if (ret) {
|
||||
DRM_ERROR("cannot allocate PCI GART page!\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!dev->pdev) {
|
||||
DRM_ERROR("PCI device unknown!\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
bus_address = pci_map_single(dev->pdev, address,
|
||||
num_pages * PAGE_SIZE,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (bus_address == 0) {
|
||||
DRM_ERROR("unable to map PCIGART pages!\n");
|
||||
order = drm_order((gart_info->table_size +
|
||||
(PAGE_SIZE-1)) / PAGE_SIZE);
|
||||
drm_ati_free_pcigart_table(address, order);
|
||||
address = NULL;
|
||||
goto done;
|
||||
}
|
||||
address = gart_info->table_handle->vaddr;
|
||||
bus_address = gart_info->table_handle->busaddr;
|
||||
} else {
|
||||
address = gart_info->addr;
|
||||
bus_address = gart_info->bus_addr;
|
||||
|
@ -224,12 +168,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
}
|
||||
}
|
||||
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
|
||||
dma_sync_single_for_device(&dev->pdev->dev,
|
||||
bus_address,
|
||||
max_pages * sizeof(u32),
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
ret = 1;
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include <linux/version.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp_lock.h> /* For (un)lock_kernel */
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
|
||||
|
@ -610,6 +611,9 @@ struct drm_ati_pcigart_info {
|
|||
int gart_reg_if;
|
||||
void *addr;
|
||||
dma_addr_t bus_addr;
|
||||
dma_addr_t table_mask;
|
||||
dma_addr_t member_mask;
|
||||
struct drm_dma_handle *table_handle;
|
||||
drm_local_map_t mapping;
|
||||
int table_size;
|
||||
};
|
||||
|
|
|
@ -273,32 +273,83 @@ out_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call bo->mutex locked.
|
||||
* Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
|
||||
*/
|
||||
|
||||
static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
|
||||
{
|
||||
struct drm_fence_object *fence = bo->fence;
|
||||
|
||||
if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
||||
return -EBUSY;
|
||||
|
||||
if (fence) {
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
mutex_unlock(&bo->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Call bo->mutex locked.
|
||||
* Wait until the buffer is idle.
|
||||
*/
|
||||
|
||||
int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
|
||||
int no_wait)
|
||||
int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
|
||||
int no_wait, int check_unfenced)
|
||||
{
|
||||
int ret;
|
||||
|
||||
DRM_ASSERT_LOCKED(&bo->mutex);
|
||||
|
||||
if (bo->fence) {
|
||||
if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
while(unlikely(drm_bo_busy(bo, check_unfenced))) {
|
||||
if (no_wait)
|
||||
return -EBUSY;
|
||||
|
||||
ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
|
||||
bo->fence_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
|
||||
mutex_unlock(&bo->mutex);
|
||||
wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
|
||||
mutex_lock(&bo->mutex);
|
||||
bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
|
||||
}
|
||||
|
||||
if (bo->fence) {
|
||||
struct drm_fence_object *fence;
|
||||
uint32_t fence_type = bo->fence_type;
|
||||
|
||||
drm_fence_reference_unlocked(&fence, bo->fence);
|
||||
mutex_unlock(&bo->mutex);
|
||||
|
||||
ret = drm_fence_object_wait(fence, lazy, !interruptible,
|
||||
fence_type);
|
||||
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
mutex_lock(&bo->mutex);
|
||||
bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -314,7 +365,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
|
|||
unsigned long _end = jiffies + 3 * DRM_HZ;
|
||||
int ret;
|
||||
do {
|
||||
ret = drm_bo_wait(bo, 0, 1, 0);
|
||||
ret = drm_bo_wait(bo, 0, 0, 0, 0);
|
||||
if (ret && allow_errors)
|
||||
return ret;
|
||||
|
||||
|
@ -689,24 +740,32 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
|
|||
* buffer mutex.
|
||||
*/
|
||||
|
||||
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
|
||||
goto out;
|
||||
if (bo->mem.mem_type != mem_type)
|
||||
goto out;
|
||||
do {
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
|
||||
ret = drm_bo_wait(bo, 0, 0, no_wait);
|
||||
if (unlikely(bo->mem.flags &
|
||||
(DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
|
||||
goto out_unlock;
|
||||
if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
||||
goto out_unlock;
|
||||
if (unlikely(bo->mem.mem_type != mem_type))
|
||||
goto out_unlock;
|
||||
ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (ret && ret != -EAGAIN) {
|
||||
DRM_ERROR("Failed to expire fence before "
|
||||
"buffer eviction.\n");
|
||||
goto out;
|
||||
}
|
||||
} while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||
|
||||
evict_mem = bo->mem;
|
||||
evict_mem.mm_node = NULL;
|
||||
|
||||
evict_mem = bo->mem;
|
||||
evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_del_init(&bo->lru);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
|
||||
|
||||
if (ret) {
|
||||
|
@ -724,20 +783,21 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
|
|||
goto out;
|
||||
}
|
||||
|
||||
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
|
||||
_DRM_BO_FLAG_EVICTED);
|
||||
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (evict_mem.mm_node) {
|
||||
if (evict_mem.mm_node != bo->pinned_node)
|
||||
drm_mm_put_block(evict_mem.mm_node);
|
||||
evict_mem.mm_node = NULL;
|
||||
}
|
||||
list_del(&bo->lru);
|
||||
drm_bo_add_to_lru(bo);
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
|
||||
_DRM_BO_FLAG_EVICTED);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -772,8 +832,6 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
|
|||
atomic_inc(&entry->usage);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_lock(&entry->mutex);
|
||||
BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
|
||||
|
||||
ret = drm_bo_evict(entry, mem_type, no_wait);
|
||||
mutex_unlock(&entry->mutex);
|
||||
drm_bo_usage_deref_unlocked(&entry);
|
||||
|
@ -1039,46 +1097,23 @@ EXPORT_SYMBOL(drm_lookup_buffer_object);
|
|||
|
||||
/*
|
||||
* Call bo->mutex locked.
|
||||
* Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
|
||||
* Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
|
||||
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
|
||||
*/
|
||||
|
||||
static int drm_bo_quick_busy(struct drm_buffer_object *bo)
|
||||
static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
|
||||
{
|
||||
struct drm_fence_object *fence = bo->fence;
|
||||
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
||||
return -EBUSY;
|
||||
|
||||
if (fence) {
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call bo->mutex locked.
|
||||
* Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
|
||||
*/
|
||||
|
||||
static int drm_bo_busy(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_fence_object *fence = bo->fence;
|
||||
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
if (fence) {
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1102,59 +1137,24 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if ((atomic_read(&bo->mapped) >= 0) && no_wait)
|
||||
return -EBUSY;
|
||||
|
||||
DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
|
||||
atomic_read(&bo->mapped) == -1);
|
||||
|
||||
if (ret == -EINTR)
|
||||
ret = -EAGAIN;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
mutex_unlock(&bo->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until a buffer, scheduled to be fenced moves off the unfenced list.
|
||||
* Until then, we cannot really do anything with it except delete it.
|
||||
*/
|
||||
|
||||
static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
|
||||
int eagain_if_wait)
|
||||
{
|
||||
int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
|
||||
if (ret && no_wait)
|
||||
return -EBUSY;
|
||||
else if (!ret)
|
||||
if (likely(atomic_read(&bo->mapped)) == 0)
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
mutex_unlock(&bo->mutex);
|
||||
DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
|
||||
!drm_bo_check_unfenced(bo));
|
||||
mutex_lock(&bo->mutex);
|
||||
if (ret == -EINTR)
|
||||
return -EAGAIN;
|
||||
ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
if (ret) {
|
||||
DRM_ERROR("Timeout waiting for buffer to become fenced\n");
|
||||
if (unlikely(no_wait))
|
||||
return -EBUSY;
|
||||
}
|
||||
if (eagain_if_wait)
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
do {
|
||||
mutex_unlock(&bo->mutex);
|
||||
ret = wait_event_interruptible(bo->event_queue,
|
||||
atomic_read(&bo->mapped) == 0);
|
||||
mutex_lock(&bo->mutex);
|
||||
bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
|
||||
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EAGAIN;
|
||||
} while((ret == 0) && atomic_read(&bo->mapped) > 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1162,8 +1162,8 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
|
|||
* Bo locked.
|
||||
*/
|
||||
|
||||
static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
||||
struct drm_bo_info_rep *rep)
|
||||
void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
||||
struct drm_bo_info_rep *rep)
|
||||
{
|
||||
if (!rep)
|
||||
return;
|
||||
|
@ -1189,11 +1189,12 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
|||
rep->rep_flags = 0;
|
||||
rep->page_alignment = bo->mem.page_alignment;
|
||||
|
||||
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
|
||||
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
|
||||
DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
|
||||
DRM_BO_REP_BUSY);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_fill_rep_arg);
|
||||
|
||||
/*
|
||||
* Wait for buffer idle and register that we've mapped the buffer.
|
||||
|
@ -1219,61 +1220,33 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
|
|||
return -EINVAL;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
do {
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
|
||||
/*
|
||||
* If this returns true, we are currently unmapped.
|
||||
* We need to do this test, because unmapping can
|
||||
* be done without the bo->mutex held.
|
||||
*/
|
||||
ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
while (1) {
|
||||
if (atomic_inc_and_test(&bo->mapped)) {
|
||||
if (no_wait && drm_bo_busy(bo)) {
|
||||
atomic_dec(&bo->mapped);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
ret = drm_bo_wait(bo, 0, 0, no_wait);
|
||||
if (ret) {
|
||||
atomic_dec(&bo->mapped);
|
||||
goto out;
|
||||
}
|
||||
if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
|
||||
drm_bo_evict_cached(bo);
|
||||
|
||||
if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
|
||||
drm_bo_evict_cached(bo);
|
||||
|
||||
break;
|
||||
} else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
|
||||
|
||||
/*
|
||||
* We are already mapped with different flags.
|
||||
* need to wait for unmap.
|
||||
*/
|
||||
|
||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
|
||||
|
||||
atomic_inc(&bo->mapped);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
if (atomic_add_negative(-1, &bo->mapped))
|
||||
if (atomic_dec_and_test(&bo->mapped))
|
||||
wake_up_all(&bo->event_queue);
|
||||
|
||||
} else
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
out:
|
||||
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1323,7 +1296,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
|
|||
|
||||
BUG_ON(action != _DRM_REF_TYPE1);
|
||||
|
||||
if (atomic_add_negative(-1, &bo->mapped))
|
||||
if (atomic_dec_and_test(&bo->mapped))
|
||||
wake_up_all(&bo->event_queue);
|
||||
}
|
||||
|
||||
|
@ -1339,19 +1312,8 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
|
|||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
int ret = 0;
|
||||
struct drm_bo_mem_reg mem;
|
||||
/*
|
||||
* Flush outstanding fences.
|
||||
*/
|
||||
|
||||
drm_bo_busy(bo);
|
||||
|
||||
/*
|
||||
* Wait for outstanding fences.
|
||||
*/
|
||||
|
||||
ret = drm_bo_wait(bo, 0, 0, no_wait);
|
||||
if (ret)
|
||||
return ret;
|
||||
BUG_ON(bo->fence != NULL);
|
||||
|
||||
mem.num_pages = bo->num_pages;
|
||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||
|
@ -1437,64 +1399,14 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
|
|||
|
||||
static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
||||
uint32_t fence_class,
|
||||
int move_unfenced, int no_wait)
|
||||
int move_unfenced, int no_wait,
|
||||
int move_buffer)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
uint32_t ftype;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
|
||||
(unsigned long long) bo->mem.proposed_flags,
|
||||
(unsigned long long) bo->mem.flags);
|
||||
|
||||
ret = driver->fence_type(bo, &fence_class, &ftype);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Driver did not support given buffer permissions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're switching command submission mechanism,
|
||||
* or cannot simply rely on the hardware serializing for us.
|
||||
*
|
||||
* Insert a driver-dependant barrier or wait for buffer idle.
|
||||
*/
|
||||
|
||||
if ((fence_class != bo->fence_class) ||
|
||||
((ftype ^ bo->fence_type) & bo->fence_type)) {
|
||||
|
||||
ret = -EINVAL;
|
||||
if (driver->command_stream_barrier) {
|
||||
ret = driver->command_stream_barrier(bo,
|
||||
fence_class,
|
||||
ftype,
|
||||
no_wait);
|
||||
}
|
||||
if (ret)
|
||||
ret = drm_bo_wait(bo, 0, 0, no_wait);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
bo->new_fence_class = fence_class;
|
||||
bo->new_fence_type = ftype;
|
||||
|
||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||
if (ret) {
|
||||
DRM_ERROR("Timed out waiting for buffer unmap.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether we need to move buffer.
|
||||
*/
|
||||
|
||||
if (!drm_bo_mem_compat(&bo->mem)) {
|
||||
if (move_buffer) {
|
||||
ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
|
||||
move_unfenced);
|
||||
if (ret) {
|
||||
|
@ -1578,6 +1490,83 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called with bo->mutex locked, but may release it
|
||||
* temporarily to wait for events.
|
||||
*/
|
||||
|
||||
static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
|
||||
uint64_t flags,
|
||||
uint64_t mask,
|
||||
uint32_t hint,
|
||||
uint32_t fence_class,
|
||||
int no_wait,
|
||||
int *move_buffer)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
uint32_t ftype;
|
||||
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
|
||||
(unsigned long long) bo->mem.proposed_flags,
|
||||
(unsigned long long) bo->mem.flags);
|
||||
|
||||
ret = drm_bo_modify_proposed_flags (bo, flags, mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = driver->fence_type(bo, &fence_class, &ftype);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Driver did not support given buffer permissions.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're switching command submission mechanism,
|
||||
* or cannot simply rely on the hardware serializing for us.
|
||||
* Insert a driver-dependant barrier or wait for buffer idle.
|
||||
*/
|
||||
|
||||
if ((fence_class != bo->fence_class) ||
|
||||
((ftype ^ bo->fence_type) & bo->fence_type)) {
|
||||
|
||||
ret = -EINVAL;
|
||||
if (driver->command_stream_barrier) {
|
||||
ret = driver->command_stream_barrier(bo,
|
||||
fence_class,
|
||||
ftype,
|
||||
no_wait);
|
||||
}
|
||||
if (ret && ret != -EAGAIN)
|
||||
ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bo->new_fence_class = fence_class;
|
||||
bo->new_fence_type = ftype;
|
||||
|
||||
/*
|
||||
* Check whether we need to move buffer.
|
||||
*/
|
||||
|
||||
*move_buffer = 0;
|
||||
if (!drm_bo_mem_compat(&bo->mem)) {
|
||||
*move_buffer = 1;
|
||||
ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_bo_do_validate:
|
||||
*
|
||||
|
@ -1610,26 +1599,34 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
|
|||
{
|
||||
int ret;
|
||||
int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
|
||||
int move_buffer;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
do {
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
|
||||
ret = drm_bo_modify_proposed_flags (bo, flags, mask);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
|
||||
fence_class, no_wait,
|
||||
&move_buffer);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
} while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
|
||||
|
||||
ret = drm_buffer_object_validate(bo,
|
||||
fence_class,
|
||||
!(hint & DRM_BO_HINT_DONT_FENCE),
|
||||
no_wait);
|
||||
no_wait,
|
||||
move_buffer);
|
||||
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||
out:
|
||||
if (rep)
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
|
||||
mutex_unlock(&bo->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_do_validate);
|
||||
|
@ -1655,22 +1652,19 @@ EXPORT_SYMBOL(drm_bo_do_validate);
|
|||
* fencing mechanism. At this point, there isn't any use of this
|
||||
* from the user mode code.
|
||||
*
|
||||
* @use_old_fence_class: don't change fence class, pull it from the buffer object
|
||||
*
|
||||
* @rep: To be stuffed with the reply from validation
|
||||
*
|
||||
*
|
||||
* @bp_rep: To be stuffed with the buffer object pointer
|
||||
*
|
||||
* Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
|
||||
* Some permissions checking is done on the parameters, otherwise this
|
||||
* is a thin wrapper.
|
||||
* Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
|
||||
* of a pointer to a buffer object. Optionally return a pointer to the buffer object.
|
||||
* This is a convenience wrapper only.
|
||||
*/
|
||||
|
||||
int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
||||
uint64_t flags, uint64_t mask,
|
||||
uint32_t hint,
|
||||
uint32_t fence_class,
|
||||
int use_old_fence_class,
|
||||
struct drm_bo_info_rep *rep,
|
||||
struct drm_buffer_object **bo_rep)
|
||||
{
|
||||
|
@ -1685,17 +1679,9 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
|||
if (!bo)
|
||||
return -EINVAL;
|
||||
|
||||
if (use_old_fence_class)
|
||||
fence_class = bo->fence_class;
|
||||
|
||||
/*
|
||||
* Only allow creator to change shared buffer mask.
|
||||
*/
|
||||
|
||||
if (bo->base.owner != file_priv)
|
||||
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
|
||||
|
||||
|
||||
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
|
||||
|
||||
if (!ret && bo_rep)
|
||||
|
@ -1707,6 +1693,7 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_bo_handle_validate);
|
||||
|
||||
|
||||
static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
|
||||
struct drm_bo_info_rep *rep)
|
||||
{
|
||||
|
@ -1721,8 +1708,12 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
|
|||
return -EINVAL;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
||||
(void)drm_bo_busy(bo);
|
||||
|
||||
/*
|
||||
* FIXME: Quick busy here?
|
||||
*/
|
||||
|
||||
drm_bo_busy(bo, 1);
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
|
@ -1746,15 +1737,11 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
|
|||
return -EINVAL;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
|
||||
ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
|
@ -1791,7 +1778,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
mutex_lock(&bo->mutex);
|
||||
|
||||
atomic_set(&bo->usage, 1);
|
||||
atomic_set(&bo->mapped, -1);
|
||||
atomic_set(&bo->mapped, 0);
|
||||
DRM_INIT_WAITQUEUE(&bo->event_queue);
|
||||
INIT_LIST_HEAD(&bo->lru);
|
||||
INIT_LIST_HEAD(&bo->pinned_lru);
|
||||
|
@ -1833,17 +1820,18 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
mutex_unlock(&bo->mutex);
|
||||
ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
|
||||
0, NULL);
|
||||
if (ret)
|
||||
goto out_err_unlocked;
|
||||
|
||||
*buf_obj = bo;
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
mutex_unlock(&bo->mutex);
|
||||
|
||||
out_err_unlocked:
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1929,6 +1917,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,
|
|||
struct drm_bo_map_wait_idle_arg *arg = data;
|
||||
struct drm_bo_info_req *req = &arg->d.req;
|
||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
||||
struct drm_buffer_object *bo;
|
||||
int ret;
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
|
@ -1936,28 +1925,29 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock);
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* validate the buffer. note that 'fence_class' will be unused
|
||||
* as we pass use_old_fence_class=1 here. Note also that
|
||||
* the libdrm API doesn't pass fence_class to the kernel,
|
||||
* so it's a good thing it isn't used here.
|
||||
*/
|
||||
ret = drm_bo_handle_validate(file_priv, req->handle,
|
||||
req->flags,
|
||||
req->mask,
|
||||
req->hint | DRM_BO_HINT_DONT_FENCE,
|
||||
req->fence_class, 1,
|
||||
rep, NULL);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
|
||||
if (bo->base.owner != file_priv)
|
||||
req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
|
||||
|
||||
ret = drm_bo_do_validate(bo, req->flags, req->mask,
|
||||
req->hint | DRM_BO_HINT_DONT_FENCE,
|
||||
bo->fence_class, rep);
|
||||
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
|
||||
(void) drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
|
@ -2448,7 +2438,7 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
|
||||
ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2499,7 +2489,7 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
|
||||
ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2547,7 +2537,7 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
}
|
||||
|
||||
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
|
||||
ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
|
||||
ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
* unmappable regions to mappable. It's a bug to leave kernel space with the
|
||||
* read lock held.
|
||||
*
|
||||
* Both read- and write lock taking is interruptible for low signal-delivery
|
||||
* Both read- and write lock taking may be interruptible for low signal-delivery
|
||||
* latency. The locking functions will return -EAGAIN if interrupted by a
|
||||
* signal.
|
||||
*
|
||||
|
@ -68,17 +68,21 @@ void drm_bo_init_lock(struct drm_bo_lock *lock)
|
|||
|
||||
void drm_bo_read_unlock(struct drm_bo_lock *lock)
|
||||
{
|
||||
if (unlikely(atomic_add_negative(-1, &lock->readers)))
|
||||
BUG();
|
||||
if (atomic_read(&lock->readers) == 0)
|
||||
wake_up_interruptible(&lock->queue);
|
||||
if (atomic_dec_and_test(&lock->readers))
|
||||
wake_up_all(&lock->queue);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_read_unlock);
|
||||
|
||||
int drm_bo_read_lock(struct drm_bo_lock *lock)
|
||||
int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
|
||||
{
|
||||
while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
|
||||
int ret;
|
||||
|
||||
if (!interruptible) {
|
||||
wait_event(lock->queue,
|
||||
atomic_read(&lock->write_lock_pending) == 0);
|
||||
continue;
|
||||
}
|
||||
ret = wait_event_interruptible
|
||||
(lock->queue, atomic_read(&lock->write_lock_pending) == 0);
|
||||
if (ret)
|
||||
|
@ -87,8 +91,13 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)
|
|||
|
||||
while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
|
||||
int ret;
|
||||
if (!interruptible) {
|
||||
wait_event(lock->queue,
|
||||
atomic_read(&lock->readers) != -1);
|
||||
continue;
|
||||
}
|
||||
ret = wait_event_interruptible
|
||||
(lock->queue, atomic_add_unless(&lock->readers, 1, -1));
|
||||
(lock->queue, atomic_read(&lock->readers) != -1);
|
||||
if (ret)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -100,9 +109,7 @@ static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
|
|||
{
|
||||
if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
|
||||
return -EINVAL;
|
||||
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
|
||||
return -EINVAL;
|
||||
wake_up_interruptible(&lock->queue);
|
||||
wake_up_all(&lock->queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -116,21 +123,26 @@ static void drm_bo_write_lock_remove(struct drm_file *file_priv,
|
|||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
||||
int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
int ret = 0;
|
||||
struct drm_device *dev;
|
||||
|
||||
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
|
||||
return -EINVAL;
|
||||
atomic_inc(&lock->write_lock_pending);
|
||||
|
||||
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
|
||||
if (!interruptible) {
|
||||
wait_event(lock->queue,
|
||||
atomic_read(&lock->readers) == 0);
|
||||
continue;
|
||||
}
|
||||
ret = wait_event_interruptible
|
||||
(lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
|
||||
(lock->queue, atomic_read(&lock->readers) == 0);
|
||||
|
||||
if (ret) {
|
||||
atomic_set(&lock->write_lock_pending, 0);
|
||||
wake_up_interruptible(&lock->queue);
|
||||
atomic_dec(&lock->write_lock_pending);
|
||||
wake_up_all(&lock->queue);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
@ -141,6 +153,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
|||
* while holding it.
|
||||
*/
|
||||
|
||||
if (atomic_dec_and_test(&lock->write_lock_pending))
|
||||
wake_up_all(&lock->queue);
|
||||
dev = file_priv->minor->dev;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_add_user_object(file_priv, &lock->base, 0);
|
||||
|
|
|
@ -357,10 +357,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
|||
bo->mem.mm_node != NULL))
|
||||
#endif
|
||||
{
|
||||
ret = drm_bo_wait(bo, 0, 1, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (bo->fence) {
|
||||
(void) drm_fence_object_wait(bo->fence, 0, 1,
|
||||
bo->fence_type);
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
}
|
||||
drm_bo_free_old_node(bo);
|
||||
|
||||
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
|
||||
|
|
|
@ -213,7 +213,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|||
unsigned long bus_size;
|
||||
|
||||
dev = bo->dev;
|
||||
while(drm_bo_read_lock(&dev->bm.bm_lock));
|
||||
drm_bo_read_lock(&dev->bm.bm_lock, 0);
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
|
||||
|
@ -780,7 +780,7 @@ struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
|
|||
EXPORT_SYMBOL(pci_get_bus_and_slot);
|
||||
#endif
|
||||
|
||||
#if defined(DRM_KMAP_ATOMIC_PROT_PFN) && defined(CONFIG_HIMEM)
|
||||
#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
|
||||
#define drm_kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
|
||||
|
||||
|
@ -807,4 +807,3 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
|||
|
||||
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -352,6 +352,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
|
|||
#define PM_EVENT_PRETHAW 3
|
||||
#endif
|
||||
|
||||
|
||||
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
|
||||
#define DRM_KMAP_ATOMIC_PROT_PFN
|
||||
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
||||
|
@ -362,4 +363,8 @@ extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
|||
#define flush_agp_mappings() do {} while(0)
|
||||
#endif
|
||||
|
||||
#ifndef DMA_BIT_MASK
|
||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -445,6 +445,7 @@ int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
|
|||
fence->type = type;
|
||||
fence->waiting_types = 0;
|
||||
fence->signaled_types = 0;
|
||||
fence->error = 0;
|
||||
fence->sequence = sequence;
|
||||
fence->native_types = native_types;
|
||||
if (list_empty(&fc->ring))
|
||||
|
@ -482,6 +483,7 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
|
|||
fence->signaled_types = 0;
|
||||
fence->waiting_types = 0;
|
||||
fence->sequence = 0;
|
||||
fence->error = 0;
|
||||
fence->dev = dev;
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
|
||||
|
|
|
@ -418,9 +418,9 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
*/
|
||||
|
||||
do{
|
||||
spin_lock(&file_priv->master->lock.spinlock);
|
||||
spin_lock_bh(&file_priv->master->lock.spinlock);
|
||||
locked = file_priv->master->lock.idle_has_lock;
|
||||
spin_unlock(&file_priv->master->lock.spinlock);
|
||||
spin_unlock_bh(&file_priv->master->lock.spinlock);
|
||||
if (locked)
|
||||
break;
|
||||
schedule();
|
||||
|
|
|
@ -528,7 +528,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
|
|||
int crtc, ret = 0;
|
||||
u32 new;
|
||||
|
||||
crtc = modeset->arg;
|
||||
crtc = modeset->crtc;
|
||||
if (crtc >= dev->num_crtcs) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
|
|
@ -72,9 +72,10 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
return -EINVAL;
|
||||
|
||||
add_wait_queue(&master->lock.lock_queue, &entry);
|
||||
spin_lock(&master->lock.spinlock);
|
||||
spin_lock_bh(&master->lock.spinlock);
|
||||
master->lock.user_waiters++;
|
||||
spin_unlock(&master->lock.spinlock);
|
||||
spin_unlock_bh(&master->lock.spinlock);
|
||||
|
||||
for (;;) {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!master->lock.hw_lock) {
|
||||
|
@ -96,9 +97,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_lock(&master->lock.spinlock);
|
||||
spin_lock_bh(&master->lock.spinlock);
|
||||
master->lock.user_waiters--;
|
||||
spin_unlock(&master->lock.spinlock);
|
||||
spin_unlock_bh(&master->lock.spinlock);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&master->lock.lock_queue, &entry);
|
||||
|
||||
|
@ -201,7 +202,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
|
|||
unsigned int old, new, prev;
|
||||
volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
||||
|
||||
spin_lock(&lock_data->spinlock);
|
||||
spin_lock_bh(&lock_data->spinlock);
|
||||
do {
|
||||
old = *lock;
|
||||
if (old & _DRM_LOCK_HELD)
|
||||
|
@ -213,7 +214,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
|
|||
}
|
||||
prev = cmpxchg(lock, old, new);
|
||||
} while (prev != old);
|
||||
spin_unlock(&lock_data->spinlock);
|
||||
spin_unlock_bh(&lock_data->spinlock);
|
||||
|
||||
if (_DRM_LOCKING_CONTEXT(old) == context) {
|
||||
if (old & _DRM_LOCK_HELD) {
|
||||
|
@ -276,14 +277,14 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
|
|||
unsigned int old, new, prev;
|
||||
volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
||||
|
||||
spin_lock(&lock_data->spinlock);
|
||||
spin_lock_bh(&lock_data->spinlock);
|
||||
if (lock_data->kernel_waiters != 0) {
|
||||
drm_lock_transfer(lock_data, 0);
|
||||
lock_data->idle_has_lock = 1;
|
||||
spin_unlock(&lock_data->spinlock);
|
||||
spin_unlock_bh(&lock_data->spinlock);
|
||||
return 1;
|
||||
}
|
||||
spin_unlock(&lock_data->spinlock);
|
||||
spin_unlock_bh(&lock_data->spinlock);
|
||||
|
||||
do {
|
||||
old = *lock;
|
||||
|
@ -348,18 +349,18 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&lock_data->spinlock);
|
||||
spin_lock_bh(&lock_data->spinlock);
|
||||
lock_data->kernel_waiters++;
|
||||
if (!lock_data->idle_has_lock) {
|
||||
|
||||
spin_unlock(&lock_data->spinlock);
|
||||
spin_unlock_bh(&lock_data->spinlock);
|
||||
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
|
||||
spin_lock(&lock_data->spinlock);
|
||||
spin_lock_bh(&lock_data->spinlock);
|
||||
|
||||
if (ret == 1)
|
||||
lock_data->idle_has_lock = 1;
|
||||
}
|
||||
spin_unlock(&lock_data->spinlock);
|
||||
spin_unlock_bh(&lock_data->spinlock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_idlelock_take);
|
||||
|
||||
|
@ -368,7 +369,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
|
|||
unsigned int old, prev;
|
||||
volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
||||
|
||||
spin_lock(&lock_data->spinlock);
|
||||
spin_lock_bh(&lock_data->spinlock);
|
||||
if (--lock_data->kernel_waiters == 0) {
|
||||
if (lock_data->idle_has_lock) {
|
||||
do {
|
||||
|
@ -379,7 +380,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
|
|||
lock_data->idle_has_lock = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock(&lock_data->spinlock);
|
||||
spin_unlock_bh(&lock_data->spinlock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_idlelock_release);
|
||||
|
||||
|
|
|
@ -61,35 +61,39 @@ static inline size_t drm_size_align(size_t size)
|
|||
|
||||
int drm_alloc_memctl(size_t size)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = 0;
|
||||
unsigned long a_size = drm_size_align(size);
|
||||
unsigned long new_used = drm_memctl.cur_used + a_size;
|
||||
unsigned long new_used;
|
||||
|
||||
spin_lock(&drm_memctl.lock);
|
||||
if (unlikely(new_used > drm_memctl.high_threshold)) {
|
||||
if (!DRM_SUSER(DRM_CURPROC) ||
|
||||
(new_used + drm_memctl.emer_used > drm_memctl.emer_threshold) ||
|
||||
(a_size > 2*PAGE_SIZE)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow small root-only allocations, even if the
|
||||
* high threshold is exceeded.
|
||||
*/
|
||||
|
||||
new_used -= drm_memctl.high_threshold;
|
||||
drm_memctl.emer_used += new_used;
|
||||
a_size -= new_used;
|
||||
new_used = drm_memctl.cur_used + a_size;
|
||||
if (likely(new_used < drm_memctl.high_threshold)) {
|
||||
drm_memctl.cur_used = new_used;
|
||||
goto out;
|
||||
}
|
||||
drm_memctl.cur_used += a_size;
|
||||
|
||||
/*
|
||||
* Allow small allocations from root-only processes to
|
||||
* succeed until the emergency threshold is reached.
|
||||
*/
|
||||
|
||||
new_used += drm_memctl.emer_used;
|
||||
if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
|
||||
(a_size > 16*PAGE_SIZE) ||
|
||||
(new_used > drm_memctl.emer_threshold))) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_memctl.cur_used = drm_memctl.high_threshold;
|
||||
drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
|
||||
out:
|
||||
spin_unlock(&drm_memctl.lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_alloc_memctl);
|
||||
|
||||
|
||||
void drm_free_memctl(size_t size)
|
||||
{
|
||||
unsigned long a_size = drm_size_align(size);
|
||||
|
|
|
@ -310,6 +310,8 @@ struct drm_ttm_backend {
|
|||
struct drm_ttm {
|
||||
struct page *dummy_read_page;
|
||||
struct page **pages;
|
||||
long first_himem_page;
|
||||
long last_lomem_page;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
atomic_t vma_count;
|
||||
|
@ -317,6 +319,8 @@ struct drm_ttm {
|
|||
int destroy;
|
||||
uint32_t mapping_offset;
|
||||
struct drm_ttm_backend *be;
|
||||
unsigned long highest_lomem_entry;
|
||||
unsigned long lowest_himem_entry;
|
||||
enum {
|
||||
ttm_bound,
|
||||
ttm_evicted,
|
||||
|
@ -334,7 +338,7 @@ extern void drm_ttm_unbind(struct drm_ttm *ttm);
|
|||
extern void drm_ttm_evict(struct drm_ttm *ttm);
|
||||
extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
|
||||
extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
|
||||
extern void drm_ttm_cache_flush(void);
|
||||
extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
|
||||
extern int drm_ttm_populate(struct drm_ttm *ttm);
|
||||
extern int drm_ttm_set_user(struct drm_ttm *ttm,
|
||||
struct task_struct *tsk,
|
||||
|
@ -512,6 +516,14 @@ struct drm_buffer_object {
|
|||
#define _DRM_BO_FLAG_UNFENCED 0x00000001
|
||||
#define _DRM_BO_FLAG_EVICTED 0x00000002
|
||||
|
||||
/*
|
||||
* This flag indicates that a flag called with bo->mutex held has
|
||||
* temporarily released the buffer object mutex, (usually to wait for something).
|
||||
* and thus any post-lock validation needs to be rerun.
|
||||
*/
|
||||
|
||||
#define _DRM_BO_FLAG_UNLOCKED 0x00000004
|
||||
|
||||
struct drm_mem_type_manager {
|
||||
int has_type;
|
||||
int use_type;
|
||||
|
@ -677,8 +689,8 @@ extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
|||
uint32_t hint, uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
struct drm_buffer_object **bo);
|
||||
extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
|
||||
int no_wait);
|
||||
extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
|
||||
int no_wait, int check_unfenced);
|
||||
extern int drm_bo_mem_space(struct drm_buffer_object *bo,
|
||||
struct drm_bo_mem_reg *mem, int no_wait);
|
||||
extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
|
||||
|
@ -690,7 +702,7 @@ extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
|
|||
int kern_init);
|
||||
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
||||
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||
uint32_t fence_class, int use_old_fence_class,
|
||||
uint32_t fence_class,
|
||||
struct drm_bo_info_rep *rep,
|
||||
struct drm_buffer_object **bo_rep);
|
||||
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
||||
|
@ -745,6 +757,8 @@ extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
|
|||
unsigned long dst_offset,
|
||||
unsigned long *pfn,
|
||||
pgprot_t *prot);
|
||||
extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
||||
struct drm_bo_info_rep *rep);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -797,8 +811,10 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *
|
|||
|
||||
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
|
||||
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
|
||||
extern int drm_bo_read_lock(struct drm_bo_lock *lock);
|
||||
extern int drm_bo_read_lock(struct drm_bo_lock *lock,
|
||||
int interruptible);
|
||||
extern int drm_bo_write_lock(struct drm_bo_lock *lock,
|
||||
int interruptible,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
|
||||
|
|
|
@ -30,13 +30,48 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
|
||||
static void drm_clflush_page(struct page *page)
|
||||
{
|
||||
uint8_t *page_virtual;
|
||||
unsigned int i;
|
||||
|
||||
if (unlikely(page == NULL))
|
||||
return;
|
||||
|
||||
page_virtual = kmap_atomic(page, KM_USER0);
|
||||
|
||||
for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
|
||||
clflush(page_virtual + i);
|
||||
|
||||
kunmap_atomic(page_virtual, KM_USER0);
|
||||
}
|
||||
|
||||
static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
mb();
|
||||
for (i=0; i < num_pages; ++i)
|
||||
drm_clflush_page(*pages++);
|
||||
mb();
|
||||
}
|
||||
#endif
|
||||
|
||||
static void drm_ttm_ipi_handler(void *null)
|
||||
{
|
||||
flush_agp_cache();
|
||||
}
|
||||
|
||||
void drm_ttm_cache_flush(void)
|
||||
void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
|
||||
{
|
||||
|
||||
#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
|
||||
if (cpu_has_clflush) {
|
||||
drm_ttm_cache_flush_clflush(pages, num_pages);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
|
||||
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
||||
}
|
||||
|
@ -114,7 +149,7 @@ static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
|
|||
return 0;
|
||||
|
||||
if (noncached)
|
||||
drm_ttm_cache_flush();
|
||||
drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages + i;
|
||||
|
@ -228,12 +263,16 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
|
|||
struct page *p;
|
||||
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
||||
|
||||
p = ttm->pages[index];
|
||||
if (!p) {
|
||||
while(NULL == (p = ttm->pages[index])) {
|
||||
p = drm_ttm_alloc_page();
|
||||
if (!p)
|
||||
return NULL;
|
||||
ttm->pages[index] = p;
|
||||
|
||||
if (PageHighMem(p))
|
||||
ttm->pages[--ttm->first_himem_page] = p;
|
||||
else
|
||||
ttm->pages[++ttm->last_lomem_page] = p;
|
||||
|
||||
++bm->cur_pages;
|
||||
}
|
||||
return p;
|
||||
|
@ -341,6 +380,8 @@ struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
|
|||
|
||||
ttm->destroy = 0;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
|
||||
ttm->page_flags = page_flags;
|
||||
|
||||
|
|
|
@ -738,7 +738,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
return NOPFN_SIGBUS;
|
||||
|
||||
dev = bo->dev;
|
||||
err = drm_bo_read_lock(&dev->bm.bm_lock);
|
||||
err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
||||
if (err)
|
||||
return NOPFN_REFAULT;
|
||||
|
||||
|
@ -748,12 +748,15 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
return NOPFN_REFAULT;
|
||||
}
|
||||
|
||||
err = drm_bo_wait(bo, 0, 0, 0);
|
||||
err = drm_bo_wait(bo, 0, 1, 0, 1);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
|
||||
/*
|
||||
* If buffer happens to be in a non-mappable location,
|
||||
* move it to a mappable.
|
||||
|
@ -806,6 +809,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
goto out_unlock;
|
||||
}
|
||||
out_unlock:
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return ret;
|
||||
|
|
|
@ -252,10 +252,10 @@ int i915_move(struct drm_buffer_object *bo,
|
|||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
|
||||
if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
} else {
|
||||
if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
|
||||
if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -285,6 +285,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
pci_save_state(dev->pdev);
|
||||
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
|
||||
|
||||
/* Display arbitration control */
|
||||
dev_priv->saveDSPARB = I915_READ(DSPARB);
|
||||
|
||||
/* Pipe & plane A info */
|
||||
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
|
||||
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
|
||||
|
@ -378,6 +381,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
|
||||
|
||||
/* Clock gating state */
|
||||
dev_priv->saveD_STATE = I915_READ(D_STATE);
|
||||
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
|
||||
|
||||
/* Cache mode state */
|
||||
|
@ -417,6 +421,8 @@ static int i915_resume(struct drm_device *dev)
|
|||
|
||||
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
|
||||
|
||||
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
|
||||
|
||||
/* Pipe & plane A info */
|
||||
/* Prime the clock */
|
||||
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
|
||||
|
@ -536,6 +542,7 @@ static int i915_resume(struct drm_device *dev)
|
|||
udelay(150);
|
||||
|
||||
/* Clock gating state */
|
||||
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
|
||||
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
|
||||
|
||||
/* Cache mode state */
|
||||
|
|
|
@ -0,0 +1,921 @@
|
|||
/*
|
||||
* Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Dave Airlie
|
||||
* Keith Packard
|
||||
* ... ?
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#if DRM_DEBUG_CODE
|
||||
#define DRM_DEBUG_RELOCATION (drm_debug != 0)
|
||||
#else
|
||||
#define DRM_DEBUG_RELOCATION 0
|
||||
#endif
|
||||
|
||||
enum i915_buf_idle {
|
||||
I915_RELOC_UNCHECKED,
|
||||
I915_RELOC_IDLE,
|
||||
I915_RELOC_BUSY
|
||||
};
|
||||
|
||||
struct i915_relocatee_info {
|
||||
struct drm_buffer_object *buf;
|
||||
unsigned long offset;
|
||||
uint32_t *data_page;
|
||||
unsigned page_offset;
|
||||
struct drm_bo_kmap_obj kmap;
|
||||
int is_iomem;
|
||||
int dst;
|
||||
int idle;
|
||||
int performed_ring_relocs;
|
||||
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
||||
unsigned long pfn;
|
||||
pgprot_t pg_prot;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct drm_i915_validate_buffer {
|
||||
struct drm_buffer_object *buffer;
|
||||
int presumed_offset_correct;
|
||||
void __user *data;
|
||||
int ret;
|
||||
enum i915_buf_idle idle;
|
||||
};
|
||||
|
||||
/*
|
||||
* I'd like to use MI_STORE_DATA_IMM here, but I can't make
|
||||
* it work. Seems like GART writes are broken with that
|
||||
* instruction. Also I'm not sure that MI_FLUSH will
|
||||
* act as a memory barrier for that instruction. It will
|
||||
* for this single dword 2D blit.
|
||||
*/
|
||||
|
||||
static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
|
||||
uint32_t value)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
(struct drm_i915_private *)dev->dev_private;
|
||||
|
||||
RING_LOCALS;
|
||||
i915_kernel_lost_context(dev);
|
||||
BEGIN_LP_RING(6);
|
||||
OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
|
||||
OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
|
||||
OUT_RING((0x1 << 16) | (0x4));
|
||||
OUT_RING(offset);
|
||||
OUT_RING(value);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
|
||||
*buffers, unsigned num_buffers)
|
||||
{
|
||||
while (num_buffers--)
|
||||
drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
|
||||
}
|
||||
|
||||
int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
struct i915_relocatee_info *relocatee, uint32_t * reloc)
|
||||
{
|
||||
unsigned index;
|
||||
unsigned long new_cmd_offset;
|
||||
u32 val;
|
||||
int ret, i;
|
||||
int buf_index = -1;
|
||||
|
||||
/*
|
||||
* FIXME: O(relocs * buffers) complexity.
|
||||
*/
|
||||
|
||||
for (i = 0; i <= num_buffers; i++)
|
||||
if (buffers[i].buffer)
|
||||
if (reloc[2] == buffers[i].buffer->base.hash.key)
|
||||
buf_index = i;
|
||||
|
||||
if (buf_index == -1) {
|
||||
DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Short-circuit relocations that were correctly
|
||||
* guessed by the client
|
||||
*/
|
||||
if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
|
||||
return 0;
|
||||
|
||||
new_cmd_offset = reloc[0];
|
||||
if (!relocatee->data_page ||
|
||||
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
|
||||
struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
|
||||
|
||||
drm_bo_kunmap(&relocatee->kmap);
|
||||
relocatee->data_page = NULL;
|
||||
relocatee->offset = new_cmd_offset;
|
||||
|
||||
if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
|
||||
ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
relocatee->idle = I915_RELOC_IDLE;
|
||||
}
|
||||
|
||||
if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
|
||||
(mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
|
||||
drm_bo_evict_cached(relocatee->buf);
|
||||
|
||||
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
||||
1, &relocatee->kmap);
|
||||
if (ret) {
|
||||
DRM_ERROR
|
||||
("Could not map command buffer to apply relocs\n %08lx",
|
||||
new_cmd_offset);
|
||||
return ret;
|
||||
}
|
||||
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
||||
&relocatee->is_iomem);
|
||||
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
|
||||
}
|
||||
|
||||
val = buffers[buf_index].buffer->offset;
|
||||
index = (reloc[0] - relocatee->page_offset) >> 2;
|
||||
|
||||
/* add in validate */
|
||||
val = val + reloc[1];
|
||||
|
||||
if (DRM_DEBUG_RELOCATION) {
|
||||
if (buffers[buf_index].presumed_offset_correct &&
|
||||
relocatee->data_page[index] != val) {
|
||||
DRM_DEBUG
|
||||
("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
|
||||
reloc[0], reloc[1], buf_index,
|
||||
relocatee->data_page[index], val);
|
||||
}
|
||||
}
|
||||
|
||||
if (relocatee->is_iomem)
|
||||
iowrite32(val, relocatee->data_page + index);
|
||||
else
|
||||
relocatee->data_page[index] = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_process_relocs(struct drm_file *file_priv,
|
||||
uint32_t buf_handle,
|
||||
uint32_t __user ** reloc_user_ptr,
|
||||
struct i915_relocatee_info *relocatee,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t num_buffers)
|
||||
{
|
||||
int ret, reloc_stride;
|
||||
uint32_t cur_offset;
|
||||
uint32_t reloc_count;
|
||||
uint32_t reloc_type;
|
||||
uint32_t reloc_buf_size;
|
||||
uint32_t *reloc_buf = NULL;
|
||||
int i;
|
||||
|
||||
/* do a copy from user from the user ptr */
|
||||
ret = get_user(reloc_count, *reloc_user_ptr);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not map relocation buffer.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not map relocation buffer.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reloc_type != 0) {
|
||||
DRM_ERROR("Unsupported relocation type requested\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
reloc_buf_size =
|
||||
(I915_RELOC_HEADER +
|
||||
(reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
|
||||
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
|
||||
if (!reloc_buf) {
|
||||
DRM_ERROR("Out of memory for reloc buffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* get next relocate buffer handle */
|
||||
*reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
|
||||
|
||||
reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
|
||||
|
||||
DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
|
||||
*reloc_user_ptr);
|
||||
|
||||
for (i = 0; i < reloc_count; i++) {
|
||||
cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
|
||||
|
||||
ret = i915_apply_reloc(file_priv, num_buffers, buffers,
|
||||
relocatee, reloc_buf + cur_offset);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (reloc_buf)
|
||||
kfree(reloc_buf);
|
||||
|
||||
if (relocatee->data_page) {
|
||||
drm_bo_kunmap(&relocatee->kmap);
|
||||
relocatee->data_page = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
||||
uint32_t __user * reloc_user_ptr,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t buf_count)
|
||||
{
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
struct i915_relocatee_info relocatee;
|
||||
int ret = 0;
|
||||
int b;
|
||||
|
||||
/*
|
||||
* Short circuit relocations when all previous
|
||||
* buffers offsets were correctly guessed by
|
||||
* the client
|
||||
*/
|
||||
if (!DRM_DEBUG_RELOCATION) {
|
||||
for (b = 0; b < buf_count; b++)
|
||||
if (!buffers[b].presumed_offset_correct)
|
||||
break;
|
||||
|
||||
if (b == buf_count)
|
||||
return 0;
|
||||
}
|
||||
|
||||
memset(&relocatee, 0, sizeof(relocatee));
|
||||
relocatee.idle = I915_RELOC_UNCHECKED;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!relocatee.buf) {
|
||||
DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
mutex_lock(&relocatee.buf->mutex);
|
||||
while (reloc_user_ptr) {
|
||||
ret =
|
||||
i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
|
||||
&relocatee, buffers, buf_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("process relocs failed\n");
|
||||
goto out_err1;
|
||||
}
|
||||
}
|
||||
|
||||
out_err1:
|
||||
mutex_unlock(&relocatee.buf->mutex);
|
||||
drm_bo_usage_deref_unlocked(&relocatee.buf);
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
|
||||
{
|
||||
if (relocatee->data_page) {
|
||||
#ifndef DRM_KMAP_ATOMIC_PROT_PFN
|
||||
drm_bo_kunmap(&relocatee->kmap);
|
||||
#else
|
||||
kunmap_atomic(relocatee->data_page, KM_USER0);
|
||||
#endif
|
||||
relocatee->data_page = NULL;
|
||||
}
|
||||
relocatee->buf = NULL;
|
||||
relocatee->dst = ~0;
|
||||
}
|
||||
|
||||
static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
unsigned int dst, unsigned long dst_offset)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
|
||||
i915_clear_relocatee(relocatee);
|
||||
relocatee->dst = dst;
|
||||
relocatee->buf = buffers[dst].buffer;
|
||||
relocatee->idle = buffers[dst].idle;
|
||||
|
||||
/*
|
||||
* Check for buffer idle. If the buffer is busy, revert to
|
||||
* ring relocations.
|
||||
*/
|
||||
|
||||
if (relocatee->idle == I915_RELOC_UNCHECKED) {
|
||||
preempt_enable();
|
||||
mutex_lock(&relocatee->buf->mutex);
|
||||
|
||||
ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
|
||||
if (ret == 0)
|
||||
relocatee->idle = I915_RELOC_IDLE;
|
||||
else {
|
||||
relocatee->idle = I915_RELOC_BUSY;
|
||||
relocatee->performed_ring_relocs = 1;
|
||||
}
|
||||
mutex_unlock(&relocatee->buf->mutex);
|
||||
preempt_disable();
|
||||
buffers[dst].idle = relocatee->idle;
|
||||
}
|
||||
}
|
||||
|
||||
if (relocatee->idle == I915_RELOC_BUSY)
|
||||
return 0;
|
||||
|
||||
if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
|
||||
DRM_ERROR("Relocation destination out of bounds.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
|
||||
NULL == relocatee->data_page)) {
|
||||
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
||||
if (NULL != relocatee->data_page) {
|
||||
kunmap_atomic(relocatee->data_page, KM_USER0);
|
||||
relocatee->data_page = NULL;
|
||||
}
|
||||
ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
|
||||
&relocatee->pfn, &relocatee->pg_prot);
|
||||
if (ret) {
|
||||
DRM_ERROR("Can't map relocation destination.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
relocatee->data_page =
|
||||
kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
|
||||
relocatee->pg_prot);
|
||||
#else
|
||||
if (NULL != relocatee->data_page) {
|
||||
drm_bo_kunmap(&relocatee->kmap);
|
||||
relocatee->data_page = NULL;
|
||||
}
|
||||
|
||||
ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
|
||||
1, &relocatee->kmap);
|
||||
if (ret) {
|
||||
DRM_ERROR("Can't map relocation destination.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
||||
&relocatee->is_iomem);
|
||||
#endif
|
||||
relocatee->page_offset = dst_offset & PAGE_MASK;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_apply_post_reloc(uint32_t reloc[],
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t num_buffers,
|
||||
struct i915_relocatee_info *relocatee)
|
||||
{
|
||||
uint32_t reloc_buffer = reloc[2];
|
||||
uint32_t dst_buffer = reloc[3];
|
||||
uint32_t val;
|
||||
uint32_t index;
|
||||
int ret;
|
||||
|
||||
if (likely(buffers[reloc_buffer].presumed_offset_correct))
|
||||
return 0;
|
||||
if (unlikely(reloc_buffer >= num_buffers)) {
|
||||
DRM_ERROR("Invalid reloc buffer index.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(dst_buffer >= num_buffers)) {
|
||||
DRM_ERROR("Invalid dest buffer index.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
val = buffers[reloc_buffer].buffer->offset;
|
||||
index = (reloc[0] - relocatee->page_offset) >> 2;
|
||||
val = val + reloc[1];
|
||||
|
||||
if (relocatee->idle == I915_RELOC_BUSY) {
|
||||
i915_emit_ring_reloc(relocatee->buf->dev,
|
||||
relocatee->buf->offset + reloc[0], val);
|
||||
return 0;
|
||||
}
|
||||
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
||||
relocatee->data_page[index] = val;
|
||||
#else
|
||||
if (likely(relocatee->is_iomem))
|
||||
iowrite32(val, relocatee->data_page + index);
|
||||
else
|
||||
relocatee->data_page[index] = val;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_post_relocs(struct drm_file *file_priv,
|
||||
uint32_t __user * new_reloc_ptr,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
unsigned int num_buffers)
|
||||
{
|
||||
uint32_t *reloc;
|
||||
uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
|
||||
uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
|
||||
struct i915_relocatee_info relocatee;
|
||||
uint32_t reloc_type;
|
||||
uint32_t num_relocs;
|
||||
uint32_t count;
|
||||
int ret = 0;
|
||||
int i;
|
||||
int short_circuit = 1;
|
||||
uint32_t __user *reloc_ptr;
|
||||
uint64_t new_reloc_data;
|
||||
uint32_t reloc_buf_size;
|
||||
uint32_t *reloc_buf;
|
||||
|
||||
for (i = 0; i < num_buffers; ++i) {
|
||||
if (unlikely(!buffers[i].presumed_offset_correct)) {
|
||||
short_circuit = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(short_circuit))
|
||||
return 0;
|
||||
|
||||
memset(&relocatee, 0, sizeof(relocatee));
|
||||
|
||||
while (new_reloc_ptr) {
|
||||
reloc_ptr = new_reloc_ptr;
|
||||
|
||||
ret = get_user(num_relocs, reloc_ptr);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
|
||||
header_size +
|
||||
num_relocs * reloc_stride)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = __get_user(reloc_type, reloc_ptr + 1);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
if (unlikely(reloc_type != 1)) {
|
||||
DRM_ERROR("Unsupported relocation type requested.\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __get_user(new_reloc_data, reloc_ptr + 2);
|
||||
new_reloc_ptr = (uint32_t __user *) (unsigned long)
|
||||
new_reloc_data;
|
||||
|
||||
reloc_ptr += I915_RELOC_HEADER;
|
||||
|
||||
if (num_relocs == 0)
|
||||
goto out;
|
||||
|
||||
reloc_buf_size =
|
||||
(num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
|
||||
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
|
||||
if (!reloc_buf) {
|
||||
DRM_ERROR("Out of memory for reloc buffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
reloc = reloc_buf;
|
||||
preempt_disable();
|
||||
for (count = 0; count < num_relocs; ++count) {
|
||||
ret = i915_apply_post_reloc(reloc, buffers,
|
||||
num_buffers, &relocatee);
|
||||
if (unlikely(ret)) {
|
||||
preempt_enable();
|
||||
goto out;
|
||||
}
|
||||
reloc += I915_RELOC0_STRIDE;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
if (reloc_buf) {
|
||||
kfree(reloc_buf);
|
||||
reloc_buf = NULL;
|
||||
}
|
||||
i915_clear_relocatee(&relocatee);
|
||||
}
|
||||
|
||||
out:
|
||||
/*
|
||||
* Flush ring relocs so the command parser will pick them up.
|
||||
*/
|
||||
|
||||
if (relocatee.performed_ring_relocs)
|
||||
(void)i915_emit_mi_flush(file_priv->minor->dev, 0);
|
||||
|
||||
i915_clear_relocatee(&relocatee);
|
||||
if (reloc_buf) {
|
||||
kfree(reloc_buf);
|
||||
reloc_buf = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_check_presumed(struct drm_i915_op_arg *arg,
|
||||
struct drm_buffer_object *bo,
|
||||
uint32_t __user * data, int *presumed_ok)
|
||||
{
|
||||
struct drm_bo_op_req *req = &arg->d.req;
|
||||
uint32_t hint_offset;
|
||||
uint32_t hint = req->bo_req.hint;
|
||||
|
||||
*presumed_ok = 0;
|
||||
|
||||
if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
|
||||
return 0;
|
||||
if (bo->offset == req->bo_req.presumed_offset) {
|
||||
*presumed_ok = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
|
||||
* the user-space IOCTL argument list, since the buffer has moved,
|
||||
* we're about to apply relocations and we might subsequently
|
||||
* hit an -EAGAIN. In that case the argument list will be reused by
|
||||
* user-space, but the presumed offset is no longer valid.
|
||||
*
|
||||
* Needless to say, this is a bit ugly.
|
||||
*/
|
||||
|
||||
hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
|
||||
hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
|
||||
return __put_user(hint, data + hint_offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate, add fence and relocate a block of bos from a userspace list
|
||||
*/
|
||||
int i915_validate_buffer_list(struct drm_file *file_priv,
|
||||
unsigned int fence_class, uint64_t data,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t * num_buffers,
|
||||
uint32_t __user ** post_relocs)
|
||||
{
|
||||
struct drm_i915_op_arg arg;
|
||||
struct drm_bo_op_req *req = &arg.d.req;
|
||||
int ret = 0;
|
||||
unsigned buf_count = 0;
|
||||
uint32_t buf_handle;
|
||||
uint32_t __user *reloc_user_ptr;
|
||||
struct drm_i915_validate_buffer *item = buffers;
|
||||
*post_relocs = NULL;
|
||||
|
||||
do {
|
||||
if (buf_count >= *num_buffers) {
|
||||
DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
item = buffers + buf_count;
|
||||
item->buffer = NULL;
|
||||
item->presumed_offset_correct = 0;
|
||||
item->idle = I915_RELOC_UNCHECKED;
|
||||
|
||||
if (copy_from_user
|
||||
(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (req->op != drm_bo_validate) {
|
||||
DRM_ERROR
|
||||
("Buffer object operation wasn't \"validate\".\n");
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
item->ret = 0;
|
||||
item->data = (void __user *)(unsigned long)data;
|
||||
|
||||
buf_handle = req->bo_req.handle;
|
||||
reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
|
||||
|
||||
/*
|
||||
* Switch mode to post-validation relocations?
|
||||
*/
|
||||
|
||||
if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
|
||||
(reloc_user_ptr != NULL))) {
|
||||
uint32_t reloc_type;
|
||||
|
||||
ret = get_user(reloc_type, reloc_user_ptr + 1);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
if (reloc_type == 1)
|
||||
*post_relocs = reloc_user_ptr;
|
||||
|
||||
}
|
||||
|
||||
if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
|
||||
ret =
|
||||
i915_exec_reloc(file_priv, buf_handle,
|
||||
reloc_user_ptr, buffers, buf_count);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
DRM_MEMORYBARRIER();
|
||||
}
|
||||
|
||||
ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
|
||||
req->bo_req.flags,
|
||||
req->bo_req.mask, req->bo_req.hint,
|
||||
req->bo_req.fence_class,
|
||||
NULL, &item->buffer);
|
||||
if (ret) {
|
||||
DRM_ERROR("error on handle validate %d\n", ret);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
buf_count++;
|
||||
|
||||
ret = i915_check_presumed(&arg, item->buffer,
|
||||
(uint32_t __user *)
|
||||
(unsigned long)data,
|
||||
&item->presumed_offset_correct);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
data = arg.next;
|
||||
} while (data != 0);
|
||||
out_err:
|
||||
*num_buffers = buf_count;
|
||||
item->ret = (ret != -EAGAIN) ? ret : 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove all buffers from the unfenced list.
|
||||
* If the execbuffer operation was aborted, for example due to a signal,
|
||||
* this also make sure that buffers retain their original state and
|
||||
* fence pointers.
|
||||
* Copy back buffer information to user-space unless we were interrupted
|
||||
* by a signal. In which case the IOCTL must be rerun.
|
||||
*/
|
||||
|
||||
static int i915_handle_copyback(struct drm_device *dev,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
unsigned int num_buffers, int ret)
|
||||
{
|
||||
int err = ret;
|
||||
int i;
|
||||
struct drm_i915_op_arg arg;
|
||||
struct drm_buffer_object *bo;
|
||||
|
||||
if (ret)
|
||||
drm_putback_buffer_objects(dev);
|
||||
|
||||
if (ret != -EAGAIN) {
|
||||
for (i = 0; i < num_buffers; ++i) {
|
||||
arg.handled = 1;
|
||||
arg.d.rep.ret = buffers->ret;
|
||||
bo = buffers->buffer;
|
||||
mutex_lock(&bo->mutex);
|
||||
drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
|
||||
mutex_unlock(&bo->mutex);
|
||||
if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
|
||||
err = -EFAULT;
|
||||
buffers++;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a fence object, and if that fails, pretend that everything is
|
||||
* OK and just idle the GPU.
|
||||
*/
|
||||
|
||||
void i915_fence_or_sync(struct drm_file *file_priv,
|
||||
uint32_t fence_flags,
|
||||
struct drm_fence_arg *fence_arg,
|
||||
struct drm_fence_object **fence_p)
|
||||
{
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
int ret;
|
||||
struct drm_fence_object *fence;
|
||||
|
||||
ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
|
||||
|
||||
if (ret) {
|
||||
|
||||
/*
|
||||
* Fence creation failed.
|
||||
* Fall back to synchronous operation and idle the engine.
|
||||
*/
|
||||
|
||||
(void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
|
||||
(void)i915_quiescent(dev);
|
||||
|
||||
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||
|
||||
/*
|
||||
* Communicate to user-space that
|
||||
* fence creation has failed and that
|
||||
* the engine is idle.
|
||||
*/
|
||||
|
||||
fence_arg->handle = ~0;
|
||||
fence_arg->error = ret;
|
||||
}
|
||||
drm_putback_buffer_objects(dev);
|
||||
if (fence_p)
|
||||
*fence_p = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||
|
||||
ret = drm_fence_add_user_object(file_priv, fence,
|
||||
fence_flags &
|
||||
DRM_FENCE_FLAG_SHAREABLE);
|
||||
if (!ret)
|
||||
drm_fence_fill_arg(fence, fence_arg);
|
||||
else {
|
||||
/*
|
||||
* Fence user object creation failed.
|
||||
* We must idle the engine here as well, as user-
|
||||
* space expects a fence object to wait on. Since we
|
||||
* have a fence object we wait for it to signal
|
||||
* to indicate engine "sufficiently" idle.
|
||||
*/
|
||||
|
||||
(void)drm_fence_object_wait(fence, 0, 1, fence->type);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
fence_arg->handle = ~0;
|
||||
fence_arg->error = ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (fence_p)
|
||||
*fence_p = fence;
|
||||
else if (fence)
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
}
|
||||
|
||||
int i915_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *)
|
||||
dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv =
|
||||
(struct drm_i915_master_private *)
|
||||
dev->primary->master->driver_priv;
|
||||
struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
|
||||
master_priv->sarea_priv;
|
||||
struct drm_i915_execbuffer *exec_buf = data;
|
||||
struct drm_i915_batchbuffer *batch = &exec_buf->batch;
|
||||
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
|
||||
int num_buffers;
|
||||
int ret;
|
||||
uint32_t __user *post_relocs;
|
||||
|
||||
if (!dev_priv->allow_batchbuffer) {
|
||||
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
|
||||
batch->num_cliprects *
|
||||
sizeof(struct
|
||||
drm_clip_rect)))
|
||||
return -EFAULT;
|
||||
|
||||
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The cmdbuf_mutex makes sure the validate-submit-fence
|
||||
* operation is atomic.
|
||||
*/
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
|
||||
if (ret) {
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
num_buffers = exec_buf->num_buffers;
|
||||
|
||||
if (!dev_priv->val_bufs) {
|
||||
dev_priv->val_bufs =
|
||||
vmalloc(sizeof(struct drm_i915_validate_buffer) *
|
||||
dev_priv->max_validate_buffers);
|
||||
}
|
||||
if (!dev_priv->val_bufs) {
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* validate buffer list + fixup relocations */
|
||||
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
|
||||
dev_priv->val_bufs, &num_buffers,
|
||||
&post_relocs);
|
||||
if (ret)
|
||||
goto out_err0;
|
||||
|
||||
if (post_relocs) {
|
||||
ret = i915_post_relocs(file_priv, post_relocs,
|
||||
dev_priv->val_bufs, num_buffers);
|
||||
if (ret)
|
||||
goto out_err0;
|
||||
}
|
||||
|
||||
/* make sure all previous memory operations have passed */
|
||||
DRM_MEMORYBARRIER();
|
||||
|
||||
if (!post_relocs) {
|
||||
drm_agp_chipset_flush(dev);
|
||||
batch->start =
|
||||
dev_priv->val_bufs[num_buffers - 1].buffer->offset;
|
||||
} else {
|
||||
batch->start += dev_priv->val_bufs[0].buffer->offset;
|
||||
}
|
||||
|
||||
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
|
||||
batch->start, batch->used, batch->num_cliprects);
|
||||
|
||||
ret = i915_dispatch_batchbuffer(dev, batch);
|
||||
if (ret)
|
||||
goto out_err0;
|
||||
if (sarea_priv)
|
||||
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||
i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
|
||||
|
||||
out_err0:
|
||||
ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return ret;
|
||||
}
|
|
@ -162,11 +162,13 @@ static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
|||
|
||||
void i915_fence_handler(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
||||
|
||||
write_lock(&fm->lock);
|
||||
i915_fence_poll(dev, 0, fc->waiting_types);
|
||||
if (likely(dev_priv->fence_irq_on))
|
||||
i915_fence_poll(dev, 0, fc->waiting_types);
|
||||
write_unlock(&fm->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -198,8 +198,8 @@ nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
|
|||
|
||||
/* Flip pages into the GART and move if we can. */
|
||||
static int
|
||||
nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
|
||||
struct drm_bo_mem_reg *new_mem)
|
||||
nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
|
||||
struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_bo_mem_reg tmp_mem;
|
||||
|
@ -212,11 +212,10 @@ nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
|
|||
DRM_BO_FLAG_FORCE_CACHING);
|
||||
|
||||
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_ttm_bind (bo->ttm, &tmp_mem);
|
||||
ret = drm_ttm_bind(bo->ttm, &tmp_mem);
|
||||
if (ret)
|
||||
goto out_cleanup;
|
||||
|
||||
|
@ -234,6 +233,7 @@ out_cleanup:
|
|||
tmp_mem.mm_node = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -246,22 +246,19 @@ nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
|
|||
if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
#if 0
|
||||
if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
|
||||
#endif
|
||||
if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
else
|
||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
#if 0
|
||||
if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
|
||||
#endif
|
||||
if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
else {
|
||||
// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
|
||||
if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -86,7 +86,11 @@ static struct drm_driver driver = {
|
|||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
#ifdef GIT_REVISION
|
||||
.date = GIT_REVISION,
|
||||
#else
|
||||
.date = DRIVER_DATE,
|
||||
#endif
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
|
|
|
@ -80,12 +80,11 @@ nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_type
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
|
||||
struct nouveau_channel *chan = dev_priv->fifos[class];
|
||||
uint32_t pending_types = 0;
|
||||
|
||||
DRM_DEBUG("class=%d\n", class);
|
||||
DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
|
||||
|
||||
if (pending_types) {
|
||||
if (waiting_types & DRM_FENCE_TYPE_EXE) {
|
||||
uint32_t sequence = NV_READ(chan->ref_cnt);
|
||||
|
||||
DRM_DEBUG("got 0x%08x\n", sequence);
|
||||
|
|
|
@ -618,7 +618,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
* (Not a big limitation anyway.)
|
||||
*/
|
||||
|
||||
if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
|
||||
if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
|
||||
DRM_ERROR("Too large system memory stride. Stride: %d, "
|
||||
"Length: %d\n", xfer->mem_stride, xfer->line_length);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -86,6 +86,7 @@ int xgi_pcie_heap_init(struct xgi_info * info)
|
|||
return err;
|
||||
}
|
||||
|
||||
info->gart_info.table_mask = DMA_BIT_MASK(32);
|
||||
info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
|
||||
info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
|
||||
info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);
|
||||
|
|
|
@ -565,7 +565,6 @@ union drm_wait_vblank {
|
|||
enum drm_hotplug_seq_type {
|
||||
_DRM_HOTPLUG_SIGNAL = 0x00000001, /**< Send signal instead of blocking */
|
||||
};
|
||||
|
||||
struct drm_wait_hotplug_request {
|
||||
enum drm_hotplug_seq_type type;
|
||||
unsigned long signal;
|
||||
|
@ -593,14 +592,15 @@ enum drm_modeset_ctl_cmd {
|
|||
_DRM_POST_MODESET = 2,
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MODESET_CTL ioctl argument type
|
||||
*
|
||||
* \sa drmModesetCtl().
|
||||
*/
|
||||
struct drm_modeset_ctl {
|
||||
unsigned long arg;
|
||||
enum drm_modeset_ctl_cmd cmd;
|
||||
uint32_t crtc;
|
||||
uint32_t cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -205,9 +205,9 @@
|
|||
0x1002 0x71D6 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700 XT"
|
||||
0x1002 0x71DA CHIP_RV530|RADEON_NEW_MEMMAP "ATI FireGL V5200"
|
||||
0x1002 0x71DE CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700"
|
||||
0x1002 0x7200 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X2300HD"
|
||||
0x1002 0x7210 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300"
|
||||
0x1002 0x7211 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300"
|
||||
0x1002 0x7200 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X2300HD"
|
||||
0x1002 0x7210 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300"
|
||||
0x1002 0x7211 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300"
|
||||
0x1002 0x7240 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1950"
|
||||
0x1002 0x7243 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
|
||||
0x1002 0x7244 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1950"
|
||||
|
@ -238,6 +238,7 @@
|
|||
0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"
|
||||
0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
|
||||
0x1002 0x791e CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS690 X1250 IGP"
|
||||
0x1002 0x791f CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS690 X1270 IGP"
|
||||
|
||||
[r128]
|
||||
0x1002 0x4c45 0 "ATI Rage 128 Mobility LE (PCI)"
|
||||
|
|
|
@ -243,7 +243,7 @@ static int i915_initialize(struct drm_device * dev,
|
|||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
||||
|
||||
/* Program Hardware Status Page */
|
||||
if (!IS_G33(dev)) {
|
||||
if (!I915_NEED_GFX_HWS(dev)) {
|
||||
dev_priv->status_page_dmah =
|
||||
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
|
||||
|
||||
|
@ -578,8 +578,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_dispatch_batchbuffer(struct drm_device * dev,
|
||||
drm_i915_batchbuffer_t * batch)
|
||||
int i915_dispatch_batchbuffer(struct drm_device * dev,
|
||||
drm_i915_batchbuffer_t * batch)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_clip_rect __user *boxes = batch->cliprects;
|
||||
|
@ -708,7 +708,7 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
|
|||
#endif
|
||||
}
|
||||
|
||||
static int i915_quiescent(struct drm_device *dev)
|
||||
int i915_quiescent(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -795,564 +795,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
|
|||
#define DRM_DEBUG_RELOCATION 0
|
||||
#endif
|
||||
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
|
||||
struct i915_relocatee_info {
|
||||
struct drm_buffer_object *buf;
|
||||
unsigned long offset;
|
||||
u32 *data_page;
|
||||
unsigned page_offset;
|
||||
struct drm_bo_kmap_obj kmap;
|
||||
int is_iomem;
|
||||
int idle;
|
||||
int evicted;
|
||||
};
|
||||
|
||||
struct drm_i915_validate_buffer {
|
||||
struct drm_buffer_object *buffer;
|
||||
struct drm_bo_info_rep rep;
|
||||
int presumed_offset_correct;
|
||||
void __user *data;
|
||||
int ret;
|
||||
};
|
||||
|
||||
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,
|
||||
unsigned num_buffers)
|
||||
{
|
||||
while (num_buffers--)
|
||||
drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
|
||||
}
|
||||
|
||||
int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
struct i915_relocatee_info *relocatee,
|
||||
uint32_t *reloc)
|
||||
{
|
||||
unsigned index;
|
||||
unsigned long new_cmd_offset;
|
||||
u32 val;
|
||||
int ret, i;
|
||||
int buf_index = -1;
|
||||
|
||||
/*
|
||||
* FIXME: O(relocs * buffers) complexity.
|
||||
*/
|
||||
|
||||
for (i = 0; i <= num_buffers; i++)
|
||||
if (buffers[i].buffer)
|
||||
if (reloc[2] == buffers[i].buffer->base.hash.key)
|
||||
buf_index = i;
|
||||
|
||||
if (buf_index == -1) {
|
||||
DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Short-circuit relocations that were correctly
|
||||
* guessed by the client
|
||||
*/
|
||||
if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
|
||||
return 0;
|
||||
|
||||
new_cmd_offset = reloc[0];
|
||||
if (!relocatee->data_page ||
|
||||
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
|
||||
drm_bo_kunmap(&relocatee->kmap);
|
||||
relocatee->data_page = NULL;
|
||||
relocatee->offset = new_cmd_offset;
|
||||
|
||||
if (unlikely(!relocatee->idle)) {
|
||||
ret = drm_bo_wait(relocatee->buf, 0, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
relocatee->idle = 1;
|
||||
}
|
||||
|
||||
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
||||
1, &relocatee->kmap);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
|
||||
return ret;
|
||||
}
|
||||
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
||||
&relocatee->is_iomem);
|
||||
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
|
||||
|
||||
if (!relocatee->evicted &&
|
||||
relocatee->buf->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
|
||||
drm_bo_evict_cached(relocatee->buf);
|
||||
relocatee->evicted = 1;
|
||||
}
|
||||
}
|
||||
|
||||
val = buffers[buf_index].buffer->offset;
|
||||
index = (reloc[0] - relocatee->page_offset) >> 2;
|
||||
|
||||
/* add in validate */
|
||||
val = val + reloc[1];
|
||||
|
||||
if (DRM_DEBUG_RELOCATION) {
|
||||
if (buffers[buf_index].presumed_offset_correct &&
|
||||
relocatee->data_page[index] != val) {
|
||||
DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
|
||||
reloc[0], reloc[1], buf_index, relocatee->data_page[index], val);
|
||||
}
|
||||
}
|
||||
|
||||
if (relocatee->is_iomem)
|
||||
iowrite32(val, relocatee->data_page + index);
|
||||
else
|
||||
relocatee->data_page[index] = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_process_relocs(struct drm_file *file_priv,
|
||||
uint32_t buf_handle,
|
||||
uint32_t __user **reloc_user_ptr,
|
||||
struct i915_relocatee_info *relocatee,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t num_buffers)
|
||||
{
|
||||
int ret, reloc_stride;
|
||||
uint32_t cur_offset;
|
||||
uint32_t reloc_count;
|
||||
uint32_t reloc_type;
|
||||
uint32_t reloc_buf_size;
|
||||
uint32_t *reloc_buf = NULL;
|
||||
int i;
|
||||
|
||||
/* do a copy from user from the user ptr */
|
||||
ret = get_user(reloc_count, *reloc_user_ptr);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not map relocation buffer.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = get_user(reloc_type, (*reloc_user_ptr)+1);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not map relocation buffer.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reloc_type != 0) {
|
||||
DRM_ERROR("Unsupported relocation type requested\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
reloc_buf_size = (I915_RELOC_HEADER + (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
|
||||
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
|
||||
if (!reloc_buf) {
|
||||
DRM_ERROR("Out of memory for reloc buffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* get next relocate buffer handle */
|
||||
*reloc_user_ptr = (uint32_t *)*(unsigned long *)&reloc_buf[2];
|
||||
|
||||
reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
|
||||
|
||||
DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, *reloc_user_ptr);
|
||||
|
||||
for (i = 0; i < reloc_count; i++) {
|
||||
cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
|
||||
|
||||
ret = i915_apply_reloc(file_priv, num_buffers, buffers,
|
||||
relocatee, reloc_buf + cur_offset);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (reloc_buf)
|
||||
kfree(reloc_buf);
|
||||
|
||||
if (relocatee->data_page) {
|
||||
drm_bo_kunmap(&relocatee->kmap);
|
||||
relocatee->data_page = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
||||
uint32_t __user *reloc_user_ptr,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t buf_count)
|
||||
{
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
struct i915_relocatee_info relocatee;
|
||||
int ret = 0;
|
||||
int b;
|
||||
|
||||
/*
|
||||
* Short circuit relocations when all previous
|
||||
* buffers offsets were correctly guessed by
|
||||
* the client
|
||||
*/
|
||||
if (!DRM_DEBUG_RELOCATION) {
|
||||
for (b = 0; b < buf_count; b++)
|
||||
if (!buffers[b].presumed_offset_correct)
|
||||
break;
|
||||
|
||||
if (b == buf_count)
|
||||
return 0;
|
||||
}
|
||||
|
||||
memset(&relocatee, 0, sizeof(relocatee));
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!relocatee.buf) {
|
||||
DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
mutex_lock (&relocatee.buf->mutex);
|
||||
while (reloc_user_ptr) {
|
||||
ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("process relocs failed\n");
|
||||
goto out_err1;
|
||||
}
|
||||
}
|
||||
|
||||
out_err1:
|
||||
mutex_unlock (&relocatee.buf->mutex);
|
||||
drm_bo_usage_deref_unlocked(&relocatee.buf);
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_check_presumed(struct drm_i915_op_arg *arg,
|
||||
struct drm_buffer_object *bo,
|
||||
uint32_t __user *data,
|
||||
int *presumed_ok)
|
||||
{
|
||||
struct drm_bo_op_req *req = &arg->d.req;
|
||||
uint32_t hint_offset;
|
||||
uint32_t hint = req->bo_req.hint;
|
||||
|
||||
*presumed_ok = 0;
|
||||
|
||||
if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
|
||||
return 0;
|
||||
if (bo->offset == req->bo_req.presumed_offset) {
|
||||
*presumed_ok = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
|
||||
* the user-space IOCTL argument list, since the buffer has moved,
|
||||
* we're about to apply relocations and we might subsequently
|
||||
* hit an -EAGAIN. In that case the argument list will be reused by
|
||||
* user-space, but the presumed offset is no longer valid.
|
||||
*
|
||||
* Needless to say, this is a bit ugly.
|
||||
*/
|
||||
|
||||
hint_offset = (uint32_t *)&req->bo_req.hint - (uint32_t *)arg;
|
||||
hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
|
||||
return __put_user(hint, data + hint_offset);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Validate, add fence and relocate a block of bos from a userspace list
|
||||
*/
|
||||
int i915_validate_buffer_list(struct drm_file *file_priv,
|
||||
unsigned int fence_class, uint64_t data,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
uint32_t *num_buffers)
|
||||
{
|
||||
struct drm_i915_op_arg arg;
|
||||
struct drm_bo_op_req *req = &arg.d.req;
|
||||
int ret = 0;
|
||||
unsigned buf_count = 0;
|
||||
uint32_t buf_handle;
|
||||
uint32_t __user *reloc_user_ptr;
|
||||
struct drm_i915_validate_buffer *item = buffers;
|
||||
|
||||
do {
|
||||
if (buf_count >= *num_buffers) {
|
||||
DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
item = buffers + buf_count;
|
||||
item->buffer = NULL;
|
||||
item->presumed_offset_correct = 0;
|
||||
|
||||
buffers[buf_count].buffer = NULL;
|
||||
|
||||
if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (req->op != drm_bo_validate) {
|
||||
DRM_ERROR
|
||||
("Buffer object operation wasn't \"validate\".\n");
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
item->ret = 0;
|
||||
item->data = (void __user *) (unsigned long) data;
|
||||
|
||||
buf_handle = req->bo_req.handle;
|
||||
reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr;
|
||||
|
||||
if (reloc_user_ptr) {
|
||||
ret = i915_exec_reloc(file_priv, buf_handle, reloc_user_ptr, buffers, buf_count);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
DRM_MEMORYBARRIER();
|
||||
}
|
||||
|
||||
ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
|
||||
req->bo_req.flags, req->bo_req.mask,
|
||||
req->bo_req.hint,
|
||||
req->bo_req.fence_class, 0,
|
||||
&item->rep,
|
||||
&item->buffer);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("error on handle validate %d\n", ret);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
buf_count++;
|
||||
|
||||
ret = i915_check_presumed(&arg, item->buffer,
|
||||
(uint32_t __user *)
|
||||
(unsigned long) data,
|
||||
&item->presumed_offset_correct);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
data = arg.next;
|
||||
} while (data != 0);
|
||||
out_err:
|
||||
*num_buffers = buf_count;
|
||||
item->ret = (ret != -EAGAIN) ? ret : 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Remove all buffers from the unfenced list.
|
||||
* If the execbuffer operation was aborted, for example due to a signal,
|
||||
* this also make sure that buffers retain their original state and
|
||||
* fence pointers.
|
||||
* Copy back buffer information to user-space unless we were interrupted
|
||||
* by a signal. In which case the IOCTL must be rerun.
|
||||
*/
|
||||
|
||||
static int i915_handle_copyback(struct drm_device *dev,
|
||||
struct drm_i915_validate_buffer *buffers,
|
||||
unsigned int num_buffers, int ret)
|
||||
{
|
||||
int err = ret;
|
||||
int i;
|
||||
struct drm_i915_op_arg arg;
|
||||
|
||||
if (ret)
|
||||
drm_putback_buffer_objects(dev);
|
||||
|
||||
if (ret != -EAGAIN) {
|
||||
for (i = 0; i < num_buffers; ++i) {
|
||||
arg.handled = 1;
|
||||
arg.d.rep.ret = buffers->ret;
|
||||
arg.d.rep.bo_info = buffers->rep;
|
||||
if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
|
||||
err = -EFAULT;
|
||||
buffers++;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a fence object, and if that fails, pretend that everything is
|
||||
* OK and just idle the GPU.
|
||||
*/
|
||||
|
||||
void i915_fence_or_sync(struct drm_file *file_priv,
|
||||
uint32_t fence_flags,
|
||||
struct drm_fence_arg *fence_arg,
|
||||
struct drm_fence_object **fence_p)
|
||||
{
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
int ret;
|
||||
struct drm_fence_object *fence;
|
||||
|
||||
ret = drm_fence_buffer_objects(dev, NULL, fence_flags,
|
||||
NULL, &fence);
|
||||
|
||||
if (ret) {
|
||||
|
||||
/*
|
||||
* Fence creation failed.
|
||||
* Fall back to synchronous operation and idle the engine.
|
||||
*/
|
||||
|
||||
(void) i915_emit_mi_flush(dev, MI_READ_FLUSH);
|
||||
(void) i915_quiescent(dev);
|
||||
|
||||
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||
|
||||
/*
|
||||
* Communicate to user-space that
|
||||
* fence creation has failed and that
|
||||
* the engine is idle.
|
||||
*/
|
||||
|
||||
fence_arg->handle = ~0;
|
||||
fence_arg->error = ret;
|
||||
}
|
||||
|
||||
drm_putback_buffer_objects(dev);
|
||||
if (fence_p)
|
||||
*fence_p = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||
|
||||
ret = drm_fence_add_user_object(file_priv, fence,
|
||||
fence_flags &
|
||||
DRM_FENCE_FLAG_SHAREABLE);
|
||||
if (!ret)
|
||||
drm_fence_fill_arg(fence, fence_arg);
|
||||
else {
|
||||
/*
|
||||
* Fence user object creation failed.
|
||||
* We must idle the engine here as well, as user-
|
||||
* space expects a fence object to wait on. Since we
|
||||
* have a fence object we wait for it to signal
|
||||
* to indicate engine "sufficiently" idle.
|
||||
*/
|
||||
|
||||
(void) drm_fence_object_wait(fence, 0, 1,
|
||||
fence->type);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
fence_arg->handle = ~0;
|
||||
fence_arg->error = ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (fence_p)
|
||||
*fence_p = fence;
|
||||
else if (fence)
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
}
|
||||
|
||||
|
||||
static int i915_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
|
||||
master_priv->sarea_priv;
|
||||
struct drm_i915_execbuffer *exec_buf = data;
|
||||
struct drm_i915_batchbuffer *batch = &exec_buf->batch;
|
||||
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
|
||||
int num_buffers;
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->allow_batchbuffer) {
|
||||
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
|
||||
batch->num_cliprects *
|
||||
sizeof(struct drm_clip_rect)))
|
||||
return -EFAULT;
|
||||
|
||||
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The cmdbuf_mutex makes sure the validate-submit-fence
|
||||
* operation is atomic.
|
||||
*/
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
|
||||
if (ret) {
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
num_buffers = exec_buf->num_buffers;
|
||||
|
||||
if (!dev_priv->val_bufs) {
|
||||
dev_priv->val_bufs =
|
||||
vmalloc(sizeof(struct drm_i915_validate_buffer)*
|
||||
dev_priv->max_validate_buffers);
|
||||
}
|
||||
if (!dev_priv->val_bufs) {
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* validate buffer list + fixup relocations */
|
||||
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
|
||||
dev_priv->val_bufs, &num_buffers);
|
||||
if (ret)
|
||||
goto out_err0;
|
||||
|
||||
/* make sure all previous memory operations have passed */
|
||||
DRM_MEMORYBARRIER();
|
||||
drm_agp_chipset_flush(dev);
|
||||
|
||||
/* submit buffer */
|
||||
batch->start = dev_priv->val_bufs[num_buffers-1].buffer->offset;
|
||||
|
||||
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
|
||||
batch->start, batch->used, batch->num_cliprects);
|
||||
|
||||
ret = i915_dispatch_batchbuffer(dev, batch);
|
||||
if (ret)
|
||||
goto out_err0;
|
||||
|
||||
if (sarea_priv)
|
||||
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
|
||||
|
||||
out_err0:
|
||||
|
||||
/* handle errors */
|
||||
ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int i915_do_cleanup_pageflip(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
|
@ -1548,6 +990,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
drm_i915_hws_addr_t *hws = data;
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -362,13 +362,28 @@ typedef struct drm_i915_hws_addr {
|
|||
* 2 - buffer handle
|
||||
* 3 - reserved (for optimisations later).
|
||||
*/
|
||||
/*
|
||||
* type 1 relocation has 4-uint32_t stride.
|
||||
* Hangs off the first item in the op list.
|
||||
* Performed after all valiations are done.
|
||||
* Try to group relocs into the same relocatee together for
|
||||
* performance reasons.
|
||||
* 0 - offset into buffer
|
||||
* 1 - delta to add in
|
||||
* 2 - buffer index in op list.
|
||||
* 3 - relocatee index in op list.
|
||||
*/
|
||||
#define I915_RELOC_TYPE_0 0
|
||||
#define I915_RELOC0_STRIDE 4
|
||||
#define I915_RELOC_TYPE_1 1
|
||||
#define I915_RELOC1_STRIDE 4
|
||||
|
||||
|
||||
struct drm_i915_op_arg {
|
||||
uint64_t next;
|
||||
uint64_t reloc_ptr;
|
||||
int handled;
|
||||
unsigned int pad64;
|
||||
union {
|
||||
struct drm_bo_op_req req;
|
||||
struct drm_bo_arg_rep rep;
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20070209"
|
||||
#define DRIVER_DATE "20080312"
|
||||
|
||||
#if defined(__linux__)
|
||||
#define I915_HAVE_FENCE
|
||||
|
@ -61,7 +61,7 @@
|
|||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
|
||||
#define DRIVER_MINOR 12
|
||||
#define DRIVER_MINOR 13
|
||||
#else
|
||||
#define DRIVER_MINOR 6
|
||||
#endif
|
||||
|
@ -175,6 +175,7 @@ struct drm_i915_private {
|
|||
u8 saveLBB;
|
||||
u32 saveDSPACNTR;
|
||||
u32 saveDSPBCNTR;
|
||||
u32 saveDSPARB;
|
||||
u32 savePIPEACONF;
|
||||
u32 savePIPEBCONF;
|
||||
u32 savePIPEASRC;
|
||||
|
@ -244,6 +245,7 @@ struct drm_i915_private {
|
|||
u32 saveIIR;
|
||||
u32 saveIMR;
|
||||
u32 saveCACHE_MODE_0;
|
||||
u32 saveD_STATE;
|
||||
u32 saveDSPCLK_GATE_D;
|
||||
u32 saveMI_ARB_STATE;
|
||||
u32 saveSWF0[16];
|
||||
|
@ -287,6 +289,9 @@ extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
|
|||
extern int i915_driver_firstopen(struct drm_device *dev);
|
||||
extern int i915_do_cleanup_pageflip(struct drm_device *dev);
|
||||
extern int i915_dma_cleanup(struct drm_device *dev);
|
||||
extern int i915_dispatch_batchbuffer(struct drm_device * dev,
|
||||
drm_i915_batchbuffer_t * batch);
|
||||
extern int i915_quiescent(struct drm_device *dev);
|
||||
|
||||
/* i915_irq.c */
|
||||
extern int i915_irq_emit(struct drm_device *dev, void *data,
|
||||
|
@ -344,6 +349,10 @@ extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
|
|||
extern int i915_move(struct drm_buffer_object *bo, int evict,
|
||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
void i915_flush_ttm(struct drm_ttm *ttm);
|
||||
/* i915_execbuf.c */
|
||||
int i915_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
|
@ -468,6 +477,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
#define CMD_STORE_DWORD_IMM ((0x20<<23) | (0x1 << 22) | 0x1)
|
||||
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
|
||||
#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
|
||||
|
||||
|
@ -883,6 +893,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define BLT_DEPTH_16_1555 (2<<24)
|
||||
#define BLT_DEPTH_32 (3<<24)
|
||||
#define BLT_ROP_GXCOPY (0xcc<<16)
|
||||
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
|
||||
#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
|
||||
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
|
@ -980,6 +992,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
/** P1 value is 2 greater than this field */
|
||||
# define VGA0_PD_P1_MASK (0x1f << 0)
|
||||
|
||||
/* PCI D state control register */
|
||||
#define D_STATE 0x6104
|
||||
#define DSPCLK_GATE_D 0x6200
|
||||
|
||||
/* I830 CRTC registers */
|
||||
|
@ -1871,6 +1885,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
|
||||
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
|
||||
|
||||
#define DSPARB 0x70030
|
||||
#define DSPARB_CSTART_MASK (0x7f << 7)
|
||||
#define DSPARB_CSTART_SHIFT 7
|
||||
#define DSPARB_BSTART_MASK (0x7f)
|
||||
#define DSPARB_BSTART_SHIFT 0
|
||||
|
||||
#define PIPEBCONF 0x71008
|
||||
#define PIPEBCONF_ENABLE (1<<31)
|
||||
#define PIPEBCONF_DISABLE 0
|
||||
|
@ -1990,8 +2010,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
|
||||
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
|
||||
#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)
|
||||
|
||||
#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
|
||||
(dev)->pci_device == 0x27AE)
|
||||
#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
|
||||
(dev)->pci_device == 0x2982 || \
|
||||
(dev)->pci_device == 0x2992 || \
|
||||
|
@ -2014,6 +2034,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
|
||||
IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
|
||||
|
||||
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev))
|
||||
|
||||
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -158,10 +158,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
|
|||
XY_SRC_COPY_BLT_WRITE_ALPHA |
|
||||
XY_SRC_COPY_BLT_WRITE_RGB)
|
||||
: XY_SRC_COPY_BLT_CMD;
|
||||
u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
|
||||
(cpp << 23) | (1 << 24);
|
||||
u32 src_pitch = sarea_priv->pitch * cpp;
|
||||
u32 dst_pitch = sarea_priv->pitch * cpp;
|
||||
/* COPY rop (0xcc), map cpp to magic color depth constants */
|
||||
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
|
||||
RING_LOCALS;
|
||||
|
||||
|
||||
if (sarea_priv->front_tiled) {
|
||||
cmd |= XY_SRC_COPY_BLT_DST_TILED;
|
||||
dst_pitch >>= 2;
|
||||
}
|
||||
if (sarea_priv->back_tiled) {
|
||||
cmd |= XY_SRC_COPY_BLT_SRC_TILED;
|
||||
src_pitch >>= 2;
|
||||
}
|
||||
|
||||
counter[0] = drm_vblank_count(dev, 0);
|
||||
counter[1] = drm_vblank_count(dev, 1);
|
||||
|
||||
|
@ -190,9 +201,6 @@ static void i915_vblank_tasklet(struct drm_device *dev)
|
|||
master_priv = vbl_swap->minor->master->driver_priv;
|
||||
sarea_priv = master_priv->sarea_priv;
|
||||
|
||||
pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
|
||||
(cpp << 23) | (1 << 24);
|
||||
|
||||
list_del(list);
|
||||
dev_priv->swaps_pending--;
|
||||
drm_vblank_put(dev, pipe);
|
||||
|
@ -287,16 +295,29 @@ static void i915_vblank_tasklet(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (init_drawrect) {
|
||||
BEGIN_LP_RING(6);
|
||||
int width = sarea_priv->width;
|
||||
int height = sarea_priv->height;
|
||||
if (IS_I965G(dev)) {
|
||||
BEGIN_LP_RING(4);
|
||||
|
||||
OUT_RING(GFX_OP_DRAWRECT_INFO);
|
||||
OUT_RING(0);
|
||||
OUT_RING(0);
|
||||
OUT_RING(sarea_priv->width | sarea_priv->height << 16);
|
||||
OUT_RING(sarea_priv->width | sarea_priv->height << 16);
|
||||
OUT_RING(0);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
|
||||
OUT_RING(0);
|
||||
OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
|
||||
OUT_RING(0);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
} else {
|
||||
BEGIN_LP_RING(6);
|
||||
|
||||
OUT_RING(GFX_OP_DRAWRECT_INFO);
|
||||
OUT_RING(0);
|
||||
OUT_RING(0);
|
||||
OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
|
||||
OUT_RING(0);
|
||||
OUT_RING(0);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
|
||||
|
||||
|
@ -321,12 +342,12 @@ static void i915_vblank_tasklet(struct drm_device *dev)
|
|||
BEGIN_LP_RING(8);
|
||||
|
||||
OUT_RING(cmd);
|
||||
OUT_RING(pitchropcpp);
|
||||
OUT_RING(ropcpp | dst_pitch);
|
||||
OUT_RING((y1 << 16) | rect->x1);
|
||||
OUT_RING((y2 << 16) | rect->x2);
|
||||
OUT_RING(offsets[front]);
|
||||
OUT_RING((y1 << 16) | rect->x1);
|
||||
OUT_RING(pitchropcpp & 0xffff);
|
||||
OUT_RING(src_pitch);
|
||||
OUT_RING(offsets[back]);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
|
|
|
@ -87,6 +87,10 @@ struct drm_nouveau_gpuobj_free {
|
|||
#define NOUVEAU_MEM_MAPPED 0x00000100
|
||||
#define NOUVEAU_MEM_INSTANCE 0x00000200 /* internal */
|
||||
#define NOUVEAU_MEM_NOTIFIER 0x00000400 /* internal */
|
||||
#define NOUVEAU_MEM_NOVM 0x00000800 /* internal */
|
||||
#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \
|
||||
NOUVEAU_MEM_NOTIFIER | \
|
||||
NOUVEAU_MEM_NOVM)
|
||||
|
||||
struct drm_nouveau_mem_alloc {
|
||||
int flags;
|
||||
|
|
|
@ -136,6 +136,7 @@ struct nouveau_channel
|
|||
/* NV50 VM */
|
||||
struct nouveau_gpuobj *vm_pd;
|
||||
struct nouveau_gpuobj_ref *vm_gart_pt;
|
||||
struct nouveau_gpuobj_ref *vm_vram_pt;
|
||||
|
||||
/* Objects */
|
||||
struct nouveau_gpuobj_ref *ramin; /* Private instmem */
|
||||
|
@ -290,6 +291,9 @@ struct drm_nouveau_private {
|
|||
unsigned long sg_handle;
|
||||
} gart_info;
|
||||
|
||||
/* G8x global VRAM page table */
|
||||
struct nouveau_gpuobj *vm_vram_pt;
|
||||
|
||||
/* the mtrr covering the FB */
|
||||
int fb_mtrr;
|
||||
|
||||
|
|
|
@ -446,6 +446,28 @@ nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_nv50_display_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR);
|
||||
|
||||
DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val);
|
||||
|
||||
NV_WRITE(NV50_DISPLAY_SUPERVISOR, val);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_nv50_i2c_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER));
|
||||
|
||||
/* This seems to be the way to acknowledge an interrupt. */
|
||||
NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF);
|
||||
}
|
||||
|
||||
irqreturn_t
|
||||
nouveau_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
|
@ -472,6 +494,16 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
|
|||
status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
|
||||
}
|
||||
|
||||
if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) {
|
||||
nouveau_nv50_display_irq_handler(dev);
|
||||
status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING;
|
||||
}
|
||||
|
||||
if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) {
|
||||
nouveau_nv50_i2c_irq_handler(dev);
|
||||
status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING;
|
||||
}
|
||||
|
||||
if (status)
|
||||
DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status);
|
||||
|
||||
|
|
|
@ -468,6 +468,11 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
/* Init FB */
|
||||
dev_priv->fb_phys=drm_get_resource_start(dev,1);
|
||||
fb_size = nouveau_mem_fb_amount(dev);
|
||||
/* On G80, limit VRAM to 512MiB temporarily due to limits in how
|
||||
* we handle VRAM page tables.
|
||||
*/
|
||||
if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
|
||||
fb_size = (512 * 1024 * 1024);
|
||||
/* On at least NV40, RAMIN is actually at the end of vram.
|
||||
* We don't want to allocate this... */
|
||||
if (dev_priv->card_type >= NV_40)
|
||||
|
@ -540,6 +545,21 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
/* G8x: Allocate shared page table to map real VRAM pages into */
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, size, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ALLOW_NO_REFS,
|
||||
&dev_priv->vm_vram_pt);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error creating VRAM page table: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -558,6 +578,12 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
|||
if (alignment < PAGE_SHIFT)
|
||||
alignment = PAGE_SHIFT;
|
||||
|
||||
/* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB
|
||||
* page size in the GPU VM.
|
||||
*/
|
||||
if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50)
|
||||
size = (size + (64 * 1024)) & ~((64 * 1024) - 1);
|
||||
|
||||
/*
|
||||
* Warn about 0 sized allocations, but let it go through. It'll return 1 page
|
||||
*/
|
||||
|
@ -612,6 +638,30 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
|||
alloc_ok:
|
||||
block->flags=type;
|
||||
|
||||
/* On G8x, map memory into VM */
|
||||
if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
|
||||
!(flags & NOUVEAU_MEM_NOVM)) {
|
||||
struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
|
||||
unsigned offset = block->start;
|
||||
unsigned count = block->size / 65536;
|
||||
|
||||
if (!pt) {
|
||||
DRM_ERROR("vm alloc without vm pt\n");
|
||||
nouveau_mem_free_block(block);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (count--) {
|
||||
unsigned pte = offset / 65536;
|
||||
|
||||
INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
|
||||
INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000);
|
||||
offset += 65536;
|
||||
}
|
||||
} else {
|
||||
block->flags |= NOUVEAU_MEM_NOVM;
|
||||
}
|
||||
|
||||
if (flags&NOUVEAU_MEM_MAPPED)
|
||||
{
|
||||
struct drm_map_list *entry;
|
||||
|
@ -653,9 +703,34 @@ alloc_ok:
|
|||
|
||||
void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
|
||||
|
||||
if (block->flags&NOUVEAU_MEM_MAPPED)
|
||||
drm_rmmap(dev, block->map);
|
||||
|
||||
/* G8x: Remove pages from vm */
|
||||
if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
|
||||
!(block->flags & NOUVEAU_MEM_NOVM)) {
|
||||
struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
|
||||
unsigned offset = block->start;
|
||||
unsigned count = block->size / 65536;
|
||||
|
||||
if (!pt) {
|
||||
DRM_ERROR("vm free without vm pt\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
while (count--) {
|
||||
unsigned pte = offset / 65536;
|
||||
INSTANCE_WR(pt, (pte * 2) + 0, 0);
|
||||
INSTANCE_WR(pt, (pte * 2) + 1, 0);
|
||||
offset += 65536;
|
||||
}
|
||||
}
|
||||
|
||||
out_free:
|
||||
nouveau_mem_free_block(block);
|
||||
}
|
||||
|
||||
|
@ -670,6 +745,9 @@ int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file
|
|||
|
||||
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
|
||||
|
||||
if (alloc->flags & NOUVEAU_MEM_INTERNAL)
|
||||
return -EINVAL;
|
||||
|
||||
block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
|
||||
alloc->flags, file_priv);
|
||||
if (!block)
|
||||
|
|
|
@ -983,7 +983,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* NV50 VM, point offset 0-512MiB at shared PCIEGART table */
|
||||
/* NV50 VM
|
||||
* - Allocate per-channel page-directory
|
||||
* - Point offset 0-512MiB at shared PCIEGART table
|
||||
* - Point offset 512-1024MiB at shared VRAM table
|
||||
*/
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
uint32_t vm_offset;
|
||||
|
||||
|
@ -1004,6 +1008,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
INSTANCE_WR(chan->vm_pd, (0+0)/4,
|
||||
chan->vm_gart_pt->instance | 0x03);
|
||||
INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000);
|
||||
|
||||
if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
|
||||
dev_priv->vm_vram_pt,
|
||||
&chan->vm_vram_pt)))
|
||||
return ret;
|
||||
INSTANCE_WR(chan->vm_pd, (8+0)/4,
|
||||
chan->vm_vram_pt->instance | 0x61);
|
||||
INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000);
|
||||
}
|
||||
|
||||
/* RAMHT */
|
||||
|
@ -1022,6 +1034,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
}
|
||||
|
||||
/* VRAM ctxdma */
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
512*1024*1024,
|
||||
dev_priv->fb_available_size,
|
||||
NV_DMA_ACCESS_RW,
|
||||
NV_DMA_TARGET_AGP, &vram);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->fb_available_size,
|
||||
NV_DMA_ACCESS_RW,
|
||||
|
@ -1084,6 +1107,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
|||
|
||||
nouveau_gpuobj_del(dev, &chan->vm_pd);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt);
|
||||
|
||||
if (chan->ramin_heap)
|
||||
nouveau_mem_takedown(&chan->ramin_heap);
|
||||
|
|
|
@ -85,8 +85,10 @@
|
|||
#define NV03_PMC_INTR_0 0x00000100
|
||||
# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8)
|
||||
# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
|
||||
# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
|
||||
# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
|
||||
# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
|
||||
# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
|
||||
# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
|
||||
#define NV03_PMC_INTR_EN_0 0x00000140
|
||||
# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<< 0)
|
||||
|
@ -123,6 +125,8 @@
|
|||
#define NV04_PTIMER_TIME_1 0x00009410
|
||||
#define NV04_PTIMER_ALARM_0 0x00009420
|
||||
|
||||
#define NV50_I2C_CONTROLLER 0x0000E054
|
||||
|
||||
#define NV04_PFB_CFG0 0x00100200
|
||||
#define NV04_PFB_CFG1 0x00100204
|
||||
#define NV40_PFB_020C 0x0010020C
|
||||
|
@ -535,6 +539,9 @@
|
|||
#define NV_CRTC1_INTEN 0x00602140
|
||||
# define NV_CRTC_INTR_VBLANK (1<<0)
|
||||
|
||||
/* This name is a partial guess. */
|
||||
#define NV50_DISPLAY_SUPERVISOR 0x00610024
|
||||
|
||||
/* Fifo commands. These are not regs, neither masks */
|
||||
#define NV03_FIFO_CMD_JUMP 0x20000000
|
||||
#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
|
||||
|
|
|
@ -46,7 +46,7 @@ static int nouveau_init_card_mappings(struct drm_device *dev)
|
|||
DRM_ERROR("Unable to initialize the mmio mapping (%d). "
|
||||
"Please report your setup to " DRIVER_EMAIL "\n",
|
||||
ret);
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset);
|
||||
|
||||
|
@ -384,6 +384,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
|||
nouveau_sgdma_takedown(dev);
|
||||
|
||||
nouveau_gpuobj_takedown(dev);
|
||||
nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt);
|
||||
|
||||
nouveau_mem_close(dev);
|
||||
engine->instmem.takedown(dev);
|
||||
|
|
|
@ -11,8 +11,17 @@ nv04_timer_init(struct drm_device *dev)
|
|||
NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000);
|
||||
NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF);
|
||||
|
||||
NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008);
|
||||
NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003);
|
||||
/* Just use the pre-existing values when possible for now; these regs
|
||||
* are not written in nv (driver writer missed a /4 on the address), and
|
||||
* writing 8 and 3 to the correct regs breaks the timings on the LVDS
|
||||
* hardware sequencing microcode.
|
||||
* A correct solution (involving calculations with the GPU PLL) can
|
||||
* be done when kernel modesetting lands
|
||||
*/
|
||||
if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) {
|
||||
NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008);
|
||||
NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -28,22 +28,6 @@
|
|||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
/* The sizes are taken from the difference between the start of two
|
||||
* grctx addresses while running the nvidia driver. Probably slightly
|
||||
* larger than they actually are, because of other objects being created
|
||||
* between the contexts
|
||||
*/
|
||||
#define NV40_GRCTX_SIZE (175*1024)
|
||||
#define NV41_GRCTX_SIZE (92*1024)
|
||||
#define NV43_GRCTX_SIZE (70*1024)
|
||||
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
|
||||
#define NV47_GRCTX_SIZE (125*1024)
|
||||
#define NV49_GRCTX_SIZE (164640)
|
||||
#define NV4A_GRCTX_SIZE (64*1024)
|
||||
#define NV4B_GRCTX_SIZE (164640)
|
||||
#define NV4C_GRCTX_SIZE (25*1024)
|
||||
#define NV4E_GRCTX_SIZE (25*1024)
|
||||
|
||||
/*TODO: deciper what each offset in the context represents. The below
|
||||
* contexts are taken from dumps just after the 3D object is
|
||||
* created.
|
||||
|
@ -1471,61 +1455,60 @@ nv40_graph_create_context(struct nouveau_channel *chan)
|
|||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
|
||||
unsigned int ctx_size;
|
||||
int ret;
|
||||
|
||||
/* These functions populate the graphics context with a whole heap
|
||||
* of default state. All these functions are very similar, with
|
||||
* a minimal amount of chipset-specific changes. However, as we're
|
||||
* currently dependant on the context programs used by the NVIDIA
|
||||
* binary driver these functions must match the layout expected by
|
||||
* them. Hopefully at some point this will all change.
|
||||
*/
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40:
|
||||
ctx_size = NV40_GRCTX_SIZE;
|
||||
ctx_init = nv40_graph_context_init;
|
||||
break;
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
ctx_size = NV41_GRCTX_SIZE;
|
||||
ctx_init = nv41_graph_context_init;
|
||||
break;
|
||||
case 0x43:
|
||||
ctx_size = NV43_GRCTX_SIZE;
|
||||
ctx_init = nv43_graph_context_init;
|
||||
break;
|
||||
case 0x46:
|
||||
ctx_size = NV46_GRCTX_SIZE;
|
||||
ctx_init = nv46_graph_context_init;
|
||||
break;
|
||||
case 0x47:
|
||||
DRM_INFO("NV47 warning: If your card behaves strangely, please come to the irc channel\n");
|
||||
ctx_size = NV47_GRCTX_SIZE;
|
||||
ctx_init = nv47_graph_context_init;
|
||||
break;
|
||||
case 0x49:
|
||||
ctx_size = NV49_GRCTX_SIZE;
|
||||
ctx_init = nv49_graph_context_init;
|
||||
break;
|
||||
case 0x44:
|
||||
case 0x4a:
|
||||
ctx_size = NV4A_GRCTX_SIZE;
|
||||
ctx_init = nv4a_graph_context_init;
|
||||
break;
|
||||
case 0x4b:
|
||||
ctx_size = NV4B_GRCTX_SIZE;
|
||||
ctx_init = nv4b_graph_context_init;
|
||||
break;
|
||||
case 0x4c:
|
||||
case 0x67:
|
||||
ctx_size = NV4C_GRCTX_SIZE;
|
||||
ctx_init = nv4c_graph_context_init;
|
||||
break;
|
||||
case 0x4e:
|
||||
ctx_size = NV4E_GRCTX_SIZE;
|
||||
ctx_init = nv4e_graph_context_init;
|
||||
break;
|
||||
default:
|
||||
ctx_size = NV40_GRCTX_SIZE;
|
||||
ctx_init = nv40_graph_context_init;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
|
||||
/* Allocate a 175KiB block of PRAMIN to store the context. This
|
||||
* is massive overkill for a lot of chipsets, but it should be safe
|
||||
* until we're able to implement this properly (will happen at more
|
||||
* or less the same time we're able to write our own context programs.
|
||||
*/
|
||||
if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&chan->ramin_grctx)))
|
||||
return ret;
|
||||
|
@ -1634,25 +1617,12 @@ nv40_graph_load_context(struct nouveau_channel *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Some voodoo that makes context switching work without the binary driver
|
||||
* initialising the card first.
|
||||
*
|
||||
* It is possible to effect how the context is saved from PGRAPH into a block
|
||||
* of instance memory by altering the values in these tables. This may mean
|
||||
* that the context layout of each chipset is slightly different (at least
|
||||
* NV40 and C51 are different). It would also be possible for chipsets to
|
||||
* have an identical context layout, but pull the data from different PGRAPH
|
||||
* registers.
|
||||
*
|
||||
* TODO: decode the meaning of the magic values, may provide clues about the
|
||||
* differences between the various NV40 chipsets.
|
||||
* TODO: one we have a better idea of how each chipset differs, perhaps think
|
||||
* about unifying these instead of providing a separate table for each
|
||||
* chip.
|
||||
*
|
||||
* mmio-trace dumps from other nv4x/g7x/c5x cards very welcome :)
|
||||
/* These blocks of "magic numbers" are actually a microcode that the GPU uses
|
||||
* to control how graphics contexts get saved and restored between PRAMIN
|
||||
* and PGRAPH during a context switch. We're currently using values seen
|
||||
* in mmio-traces of the binary driver.
|
||||
*/
|
||||
static uint32_t nv40_ctx_voodoo[] = {
|
||||
static uint32_t nv40_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406,
|
||||
0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
|
@ -1684,7 +1654,7 @@ static uint32_t nv40_ctx_voodoo[] = {
|
|||
~0
|
||||
};
|
||||
|
||||
static uint32_t nv41_ctx_voodoo[] = {
|
||||
static uint32_t nv41_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
|
||||
0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
|
||||
|
@ -1715,7 +1685,7 @@ static uint32_t nv41_ctx_voodoo[] = {
|
|||
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||
};
|
||||
|
||||
static uint32_t nv43_ctx_voodoo[] = {
|
||||
static uint32_t nv43_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
|
||||
0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
|
@ -1748,7 +1718,7 @@ static uint32_t nv43_ctx_voodoo[] = {
|
|||
~0
|
||||
};
|
||||
|
||||
static uint32_t nv44_ctx_voodoo[] = {
|
||||
static uint32_t nv44_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
|
||||
0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
|
||||
|
@ -1781,7 +1751,7 @@ static uint32_t nv44_ctx_voodoo[] = {
|
|||
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||
};
|
||||
|
||||
static uint32_t nv46_ctx_voodoo[] = {
|
||||
static uint32_t nv46_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
|
||||
0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
|
||||
|
@ -1812,7 +1782,7 @@ static uint32_t nv46_ctx_voodoo[] = {
|
|||
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||
};
|
||||
|
||||
static uint32_t nv47_ctx_voodoo[] = {
|
||||
static uint32_t nv47_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606,
|
||||
0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
|
||||
|
@ -1845,7 +1815,7 @@ static uint32_t nv47_ctx_voodoo[] = {
|
|||
};
|
||||
|
||||
//this is used for nv49 and nv4b
|
||||
static uint32_t nv49_4b_ctx_voodoo[] ={
|
||||
static uint32_t nv49_4b_ctx_prog[] ={
|
||||
0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
|
||||
0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000,
|
||||
0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e,
|
||||
|
@ -1877,7 +1847,7 @@ static uint32_t nv49_4b_ctx_voodoo[] ={
|
|||
};
|
||||
|
||||
|
||||
static uint32_t nv4a_ctx_voodoo[] = {
|
||||
static uint32_t nv4a_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
|
||||
0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
|
@ -1910,7 +1880,7 @@ static uint32_t nv4a_ctx_voodoo[] = {
|
|||
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||
};
|
||||
|
||||
static uint32_t nv4c_ctx_voodoo[] = {
|
||||
static uint32_t nv4c_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
|
||||
0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
|
||||
|
@ -1941,7 +1911,7 @@ static uint32_t nv4c_ctx_voodoo[] = {
|
|||
0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||
};
|
||||
|
||||
static uint32_t nv4e_ctx_voodoo[] = {
|
||||
static uint32_t nv4e_ctx_prog[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
|
||||
0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
|
@ -1988,7 +1958,7 @@ nv40_graph_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv =
|
||||
(struct drm_nouveau_private *)dev->dev_private;
|
||||
uint32_t *ctx_voodoo;
|
||||
uint32_t *ctx_prog;
|
||||
uint32_t vramsz, tmp;
|
||||
int i, j;
|
||||
|
||||
|
@ -1998,34 +1968,34 @@ nv40_graph_init(struct drm_device *dev)
|
|||
NV_PMC_ENABLE_PGRAPH);
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
|
||||
case 0x40: ctx_prog = nv40_ctx_prog; break;
|
||||
case 0x41:
|
||||
case 0x42: ctx_voodoo = nv41_ctx_voodoo; break;
|
||||
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
|
||||
case 0x44: ctx_voodoo = nv44_ctx_voodoo; break;
|
||||
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
|
||||
case 0x47: ctx_voodoo = nv47_ctx_voodoo; break;
|
||||
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
|
||||
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
|
||||
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
|
||||
case 0x42: ctx_prog = nv41_ctx_prog; break;
|
||||
case 0x43: ctx_prog = nv43_ctx_prog; break;
|
||||
case 0x44: ctx_prog = nv44_ctx_prog; break;
|
||||
case 0x46: ctx_prog = nv46_ctx_prog; break;
|
||||
case 0x47: ctx_prog = nv47_ctx_prog; break;
|
||||
case 0x49: ctx_prog = nv49_4b_ctx_prog; break;
|
||||
case 0x4a: ctx_prog = nv4a_ctx_prog; break;
|
||||
case 0x4b: ctx_prog = nv49_4b_ctx_prog; break;
|
||||
case 0x4c:
|
||||
case 0x67: ctx_voodoo = nv4c_ctx_voodoo; break;
|
||||
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
|
||||
case 0x67: ctx_prog = nv4c_ctx_prog; break;
|
||||
case 0x4e: ctx_prog = nv4e_ctx_prog; break;
|
||||
default:
|
||||
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n",
|
||||
dev_priv->chipset);
|
||||
ctx_voodoo = NULL;
|
||||
DRM_ERROR("Context program for 0x%02x unavailable\n",
|
||||
dev_priv->chipset);
|
||||
ctx_prog = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Load the context voodoo onto the card */
|
||||
if (ctx_voodoo) {
|
||||
DRM_DEBUG("Loading context-switch voodoo\n");
|
||||
/* Load the context program onto the card */
|
||||
if (ctx_prog) {
|
||||
DRM_DEBUG("Loading context program\n");
|
||||
i = 0;
|
||||
|
||||
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
|
||||
while (ctx_voodoo[i] != ~0) {
|
||||
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]);
|
||||
while (ctx_prog[i] != ~0) {
|
||||
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -147,44 +147,44 @@ static uint32_t nv84_ctx_voodoo[] = {
|
|||
};
|
||||
|
||||
static uint32_t nv86_ctx_voodoo[] = {
|
||||
0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
|
||||
0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
|
||||
0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906,
|
||||
0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
|
||||
0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801,
|
||||
0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
|
||||
0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d,
|
||||
0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
|
||||
0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
|
||||
0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007,
|
||||
0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
|
||||
0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007,
|
||||
0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff,
|
||||
0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
|
||||
0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
|
||||
0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
|
||||
0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
|
||||
0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
|
||||
0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
|
||||
0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c,
|
||||
0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
|
||||
0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
|
||||
0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044,
|
||||
0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103,
|
||||
0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
|
||||
0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb,
|
||||
0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387,
|
||||
0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280,
|
||||
0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000,
|
||||
0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000,
|
||||
0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081,
|
||||
0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006,
|
||||
0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd,
|
||||
0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002,
|
||||
0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d,
|
||||
0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905,
|
||||
0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e,
|
||||
0x0070001c, 0x0060000c, ~0
|
||||
0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
|
||||
0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
|
||||
0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906,
|
||||
0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
|
||||
0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801,
|
||||
0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
|
||||
0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d,
|
||||
0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
|
||||
0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
|
||||
0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007,
|
||||
0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
|
||||
0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007,
|
||||
0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff,
|
||||
0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
|
||||
0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
|
||||
0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
|
||||
0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
|
||||
0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
|
||||
0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
|
||||
0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c,
|
||||
0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
|
||||
0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
|
||||
0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044,
|
||||
0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103,
|
||||
0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
|
||||
0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb,
|
||||
0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387,
|
||||
0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280,
|
||||
0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000,
|
||||
0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000,
|
||||
0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081,
|
||||
0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006,
|
||||
0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd,
|
||||
0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002,
|
||||
0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d,
|
||||
0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905,
|
||||
0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e,
|
||||
0x0060000c, ~0
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -245,6 +245,692 @@ nv50_graph_takedown(struct drm_device *dev)
|
|||
DRM_DEBUG("\n");
|
||||
}
|
||||
|
||||
static void
|
||||
nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ctx = ref->gpuobj;
|
||||
|
||||
INSTANCE_WR(ctx, 0x0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x10C/4, 0x30);
|
||||
INSTANCE_WR(ctx, 0x1D4/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x1D8/4, 0x1000);
|
||||
INSTANCE_WR(ctx, 0x218/4, 0xFE0C);
|
||||
INSTANCE_WR(ctx, 0x22C/4, 0x1000);
|
||||
INSTANCE_WR(ctx, 0x258/4, 0x187);
|
||||
INSTANCE_WR(ctx, 0x26C/4, 0x1018);
|
||||
INSTANCE_WR(ctx, 0x270/4, 0xFF);
|
||||
INSTANCE_WR(ctx, 0x2AC/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF);
|
||||
INSTANCE_WR(ctx, 0x2B8/4, 0x600);
|
||||
INSTANCE_WR(ctx, 0x2D0/4, 0x1000000);
|
||||
INSTANCE_WR(ctx, 0x2D4/4, 0xFF);
|
||||
INSTANCE_WR(ctx, 0x2DC/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x2F4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x2F8/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x2FC/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x318/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x31C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x328/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x32C/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x344/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x348/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x34C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x35C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF);
|
||||
INSTANCE_WR(ctx, 0x364/4, 0x1FFF);
|
||||
INSTANCE_WR(ctx, 0x36C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x370/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x378/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x37C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x380/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x384/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x388/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x38C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x390/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x394/4, 0x7);
|
||||
INSTANCE_WR(ctx, 0x398/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x39C/4, 0x7);
|
||||
INSTANCE_WR(ctx, 0x3A0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3A4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3A8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3BC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3C0/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x3C8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3D4/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x3D8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3DC/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x3E4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3F0/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x404/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x408/4, 0x70);
|
||||
INSTANCE_WR(ctx, 0x40C/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x420/4, 0xC);
|
||||
INSTANCE_WR(ctx, 0x428/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x42C/4, 0x14);
|
||||
INSTANCE_WR(ctx, 0x434/4, 0x29);
|
||||
INSTANCE_WR(ctx, 0x438/4, 0x27);
|
||||
INSTANCE_WR(ctx, 0x43C/4, 0x26);
|
||||
INSTANCE_WR(ctx, 0x440/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x444/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x448/4, 0x27);
|
||||
INSTANCE_WR(ctx, 0x454/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x458/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x45C/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x460/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x464/4, 0x5);
|
||||
INSTANCE_WR(ctx, 0x468/4, 0x6);
|
||||
INSTANCE_WR(ctx, 0x46C/4, 0x7);
|
||||
INSTANCE_WR(ctx, 0x470/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4B4/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x4E4/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x4E8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x4EC/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x4F0/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x4F4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x500/4, 0x12);
|
||||
INSTANCE_WR(ctx, 0x504/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x508/4, 0xC);
|
||||
INSTANCE_WR(ctx, 0x50C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x51C/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x520/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x524/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF);
|
||||
INSTANCE_WR(ctx, 0x534/4, 0x1FFF);
|
||||
INSTANCE_WR(ctx, 0x55C/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x560/4, 0x14);
|
||||
INSTANCE_WR(ctx, 0x564/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x570/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x57C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x584/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x588/4, 0x1000);
|
||||
INSTANCE_WR(ctx, 0x58C/4, 0xE00);
|
||||
INSTANCE_WR(ctx, 0x590/4, 0x1000);
|
||||
INSTANCE_WR(ctx, 0x594/4, 0x1E00);
|
||||
INSTANCE_WR(ctx, 0x59C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5A0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5A4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5A8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5AC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5BC/4, 0x200);
|
||||
INSTANCE_WR(ctx, 0x5C4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5C8/4, 0x70);
|
||||
INSTANCE_WR(ctx, 0x5CC/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x5D8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5DC/4, 0x70);
|
||||
INSTANCE_WR(ctx, 0x5E0/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x5F0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x5F4/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x5FC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x60C/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x614/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x61C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x624/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x62C/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x630/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x634/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x63C/4, 0xF80);
|
||||
INSTANCE_WR(ctx, 0x684/4, 0x7F0080);
|
||||
INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080);
|
||||
INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821);
|
||||
INSTANCE_WR(ctx, 0x6E8/4, 0x89058001);
|
||||
INSTANCE_WR(ctx, 0x6F0/4, 0x1000);
|
||||
INSTANCE_WR(ctx, 0x6F4/4, 0x1F);
|
||||
INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA);
|
||||
INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0);
|
||||
INSTANCE_WR(ctx, 0x700/4, 0xB7892080);
|
||||
INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821);
|
||||
INSTANCE_WR(ctx, 0x710/4, 0x89058001);
|
||||
INSTANCE_WR(ctx, 0x718/4, 0x1000);
|
||||
INSTANCE_WR(ctx, 0x71C/4, 0x1F);
|
||||
INSTANCE_WR(ctx, 0x720/4, 0x27C10FA);
|
||||
INSTANCE_WR(ctx, 0x724/4, 0x400000C0);
|
||||
INSTANCE_WR(ctx, 0x728/4, 0xB7892080);
|
||||
INSTANCE_WR(ctx, 0x734/4, 0x10040);
|
||||
INSTANCE_WR(ctx, 0x73C/4, 0x22);
|
||||
INSTANCE_WR(ctx, 0x748/4, 0x10040);
|
||||
INSTANCE_WR(ctx, 0x74C/4, 0x22);
|
||||
INSTANCE_WR(ctx, 0x764/4, 0x1800000);
|
||||
INSTANCE_WR(ctx, 0x768/4, 0x160000);
|
||||
INSTANCE_WR(ctx, 0x76C/4, 0x1800000);
|
||||
INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF);
|
||||
INSTANCE_WR(ctx, 0x780/4, 0x8C0000);
|
||||
INSTANCE_WR(ctx, 0x7A4/4, 0x10401);
|
||||
INSTANCE_WR(ctx, 0x7AC/4, 0x78);
|
||||
INSTANCE_WR(ctx, 0x7B4/4, 0xBF);
|
||||
INSTANCE_WR(ctx, 0x7BC/4, 0x1210);
|
||||
INSTANCE_WR(ctx, 0x7C0/4, 0x8000080);
|
||||
INSTANCE_WR(ctx, 0x7E4/4, 0x1800000);
|
||||
INSTANCE_WR(ctx, 0x7E8/4, 0x160000);
|
||||
INSTANCE_WR(ctx, 0x7EC/4, 0x1800000);
|
||||
INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF);
|
||||
INSTANCE_WR(ctx, 0x800/4, 0x8C0000);
|
||||
INSTANCE_WR(ctx, 0x824/4, 0x10401);
|
||||
INSTANCE_WR(ctx, 0x82C/4, 0x78);
|
||||
INSTANCE_WR(ctx, 0x834/4, 0xBF);
|
||||
INSTANCE_WR(ctx, 0x83C/4, 0x1210);
|
||||
INSTANCE_WR(ctx, 0x840/4, 0x8000080);
|
||||
INSTANCE_WR(ctx, 0x868/4, 0x27070);
|
||||
INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF);
|
||||
INSTANCE_WR(ctx, 0x88C/4, 0x120407);
|
||||
INSTANCE_WR(ctx, 0x890/4, 0x5091507);
|
||||
INSTANCE_WR(ctx, 0x894/4, 0x5010202);
|
||||
INSTANCE_WR(ctx, 0x898/4, 0x30201);
|
||||
INSTANCE_WR(ctx, 0x8B4/4, 0x40);
|
||||
INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A);
|
||||
INSTANCE_WR(ctx, 0x8BC/4, 0x141210);
|
||||
INSTANCE_WR(ctx, 0x8C0/4, 0x1F0);
|
||||
INSTANCE_WR(ctx, 0x8C4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x8C8/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x8D4/4, 0x39E00);
|
||||
INSTANCE_WR(ctx, 0x8D8/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x8DC/4, 0x3800);
|
||||
INSTANCE_WR(ctx, 0x8E0/4, 0x404040);
|
||||
INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A);
|
||||
INSTANCE_WR(ctx, 0x8EC/4, 0x77F005);
|
||||
INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF);
|
||||
INSTANCE_WR(ctx, 0x7BA0/4, 0x21);
|
||||
INSTANCE_WR(ctx, 0x7BC0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x7BE0/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x7C00/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x7C20/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x7C40/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x7CA0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x7CC0/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x7CE0/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x7D00/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x7D20/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x11640/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x11660/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x49FE0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x4A000/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x4A040/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14);
|
||||
INSTANCE_WR(ctx, 0x4A0E0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4A100/4, 0x80C14);
|
||||
INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x4A180/4, 0x27);
|
||||
INSTANCE_WR(ctx, 0x4A1E0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x51A20/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x51F00/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x51F80/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x51FC0/4, 0x3F);
|
||||
INSTANCE_WR(ctx, 0x52120/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x52140/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x52160/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x52280/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x52300/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x52540/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x52560/4, 0x1001);
|
||||
INSTANCE_WR(ctx, 0x52580/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x52C00/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x52C60/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0xA84/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0xB24/4, 0x20);
|
||||
INSTANCE_WR(ctx, 0xD04/4, 0x1A);
|
||||
INSTANCE_WR(ctx, 0xEC4/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xEE4/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xF24/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xF44/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0xF84/4, 0x7FF);
|
||||
INSTANCE_WR(ctx, 0x1124/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x3604/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x3644/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x41A4/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x14844/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x14AE4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14B04/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x14B24/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x14B44/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x14B84/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x14C44/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14C84/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14CA4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14CC4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14CE4/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x14D04/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x14DE4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14E24/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14E44/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14E64/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14F04/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x14F44/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x14F64/4, 0x15);
|
||||
INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480);
|
||||
INSTANCE_WR(ctx, 0x15764/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x15804/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x15864/4, 0x10001);
|
||||
INSTANCE_WR(ctx, 0x158A4/4, 0x10001);
|
||||
INSTANCE_WR(ctx, 0x158C4/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x158E4/4, 0x10001);
|
||||
INSTANCE_WR(ctx, 0x15904/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x15924/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x15944/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF);
|
||||
INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF);
|
||||
INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x15C68/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x15C88/4, 0x1A);
|
||||
INSTANCE_WR(ctx, 0x15CE8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00);
|
||||
INSTANCE_WR(ctx, 0x16028/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x16148/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16348/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x163E8/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x16408/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x16428/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x164A8/4, 0x5);
|
||||
INSTANCE_WR(ctx, 0x164C8/4, 0x52);
|
||||
INSTANCE_WR(ctx, 0x16568/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16788/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16808/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16828/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16848/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16868/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16888/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16908/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16928/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16948/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16968/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16988/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x16E88/4, 0x5);
|
||||
INSTANCE_WR(ctx, 0x16EE8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x16FA8/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00);
|
||||
INSTANCE_WR(ctx, 0x173C8/4, 0x1A);
|
||||
INSTANCE_WR(ctx, 0x17408/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x178E8/4, 0x102);
|
||||
INSTANCE_WR(ctx, 0x17928/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17948/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17968/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17988/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x179A8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x179C8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17A08/4, 0x7FF);
|
||||
INSTANCE_WR(ctx, 0x17A48/4, 0x102);
|
||||
INSTANCE_WR(ctx, 0x17B88/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17BA8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17BC8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17BE8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x18228/4, 0x80C14);
|
||||
INSTANCE_WR(ctx, 0x18288/4, 0x804);
|
||||
INSTANCE_WR(ctx, 0x182C8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x182E8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x18308/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x18348/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x18368/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x183A8/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x18448/4, 0x804);
|
||||
INSTANCE_WR(ctx, 0x18468/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18488/4, 0x1A);
|
||||
INSTANCE_WR(ctx, 0x184A8/4, 0x7F);
|
||||
INSTANCE_WR(ctx, 0x184E8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18508/4, 0x80C14);
|
||||
INSTANCE_WR(ctx, 0x18548/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x18568/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x18588/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x185C8/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x18648/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18668/4, 0x8100C12);
|
||||
INSTANCE_WR(ctx, 0x18748/4, 0x7FF);
|
||||
INSTANCE_WR(ctx, 0x18768/4, 0x80C14);
|
||||
INSTANCE_WR(ctx, 0x18E88/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18EE8/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x19608/4, 0x88);
|
||||
INSTANCE_WR(ctx, 0x19628/4, 0x88);
|
||||
INSTANCE_WR(ctx, 0x19688/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x19968/4, 0x26);
|
||||
INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x19A48/4, 0x1A);
|
||||
INSTANCE_WR(ctx, 0x19A68/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x19F88/4, 0x52);
|
||||
INSTANCE_WR(ctx, 0x19FC8/4, 0x26);
|
||||
INSTANCE_WR(ctx, 0x1A008/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1A028/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1A068/4, 0x1A);
|
||||
INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00);
|
||||
INSTANCE_WR(ctx, 0x1A108/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1A128/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1A168/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x1A188/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14);
|
||||
INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF);
|
||||
INSTANCE_WR(ctx, 0x24A48/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x24A68/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x24AA8/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x24AC8/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x24AE8/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x24B28/4, 0x27);
|
||||
INSTANCE_WR(ctx, 0x24B68/4, 0x26);
|
||||
INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24C08/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24C28/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24C48/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24C68/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24C88/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24D08/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24D28/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24D48/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24D68/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24D88/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000);
|
||||
INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21);
|
||||
INSTANCE_WR(ctx, 0xB0C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0xCEC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0xD0C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0xD6C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0xE0C/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xE2C/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0xE4C/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0xE6C/4, 0x1001);
|
||||
INSTANCE_WR(ctx, 0xE8C/4, 0x15);
|
||||
INSTANCE_WR(ctx, 0xF4C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x106C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x108C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x10CC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x134C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x156C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x158C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x160C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x162C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x164C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x166C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x168C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x170C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x172C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x174C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x1A8C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x1ACC/4, 0x3F);
|
||||
INSTANCE_WR(ctx, 0x1BAC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x1BEC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x1C2C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x1DCC/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x1ECC/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x1FCC/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x20AC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x20CC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x20EC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x210C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x212C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x214C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x216C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x24AC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x24CC/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x24EC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x250C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x252C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x254C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x256C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x25EC/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x260C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x328C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0x346C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x348C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x34EC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x358C/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x35AC/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x35CC/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x35EC/4, 0x1001);
|
||||
INSTANCE_WR(ctx, 0x360C/4, 0x15);
|
||||
INSTANCE_WR(ctx, 0x36CC/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x37EC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x380C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x384C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x3ACC/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000);
|
||||
INSTANCE_WR(ctx, 0x420C/4, 0x10);
|
||||
INSTANCE_WR(ctx, 0x424C/4, 0x3F);
|
||||
INSTANCE_WR(ctx, 0x432C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x436C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x43AC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x454C/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x464C/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x474C/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x482C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x484C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x486C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x488C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x48AC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x48CC/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x48EC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x4C2C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4C4C/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x4C6C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4C8C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4CAC/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x4CCC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4CEC/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x4D6C/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x4D8C/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0xA30/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xCF0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xD10/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xD30/4, 0x608080);
|
||||
INSTANCE_WR(ctx, 0xDD0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xE30/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xE50/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0xE70/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0xE90/4, 0x1E00);
|
||||
INSTANCE_WR(ctx, 0xEB0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1350/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x1370/4, 0x80);
|
||||
INSTANCE_WR(ctx, 0x1390/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x13B0/4, 0x3020100);
|
||||
INSTANCE_WR(ctx, 0x13D0/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x13F0/4, 0x1E00);
|
||||
INSTANCE_WR(ctx, 0x1410/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x14B0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x14D0/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x1550/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x159F0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x15A10/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x15C50/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x15DD0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF);
|
||||
INSTANCE_WR(ctx, 0x15F70/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x15FF0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x160B0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16250/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16270/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16290/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x162B0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x162D0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x162F0/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x16310/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16350/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x164B0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x16530/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16550/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16590/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x165B0/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x165D0/4, 0xCF);
|
||||
INSTANCE_WR(ctx, 0x16730/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16750/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16770/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x16790/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x167B0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x167D0/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x167F0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16830/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16850/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16870/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16890/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x168B0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x168D0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x168F0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16910/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16930/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x16A50/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0x16BB0/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16BD0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16C50/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x16D10/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16DB0/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x16F30/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x16F50/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16F90/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x16FD0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x17010/4, 0x7FF);
|
||||
INSTANCE_WR(ctx, 0x17050/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x17090/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x175F0/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x17610/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x17630/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x17650/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x17670/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x17690/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x176B0/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x176D0/4, 0x8);
|
||||
INSTANCE_WR(ctx, 0x176F0/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x17810/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x17830/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x17850/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x17870/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x17890/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x178B0/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x178D0/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x178F0/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x17910/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x17930/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x17950/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x17970/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x17990/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x179B0/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x179D0/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x179F0/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x17A10/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x17A30/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x17B30/4, 0x20);
|
||||
INSTANCE_WR(ctx, 0x17B50/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x17B70/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x17BB0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x17C10/4, 0x40);
|
||||
INSTANCE_WR(ctx, 0x17C30/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x17C70/4, 0x3);
|
||||
INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0x17D90/4, 0x2);
|
||||
INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x17EF0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x17F90/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x17FD0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x17FF0/4, 0x400);
|
||||
INSTANCE_WR(ctx, 0x18010/4, 0x300);
|
||||
INSTANCE_WR(ctx, 0x18030/4, 0x1001);
|
||||
INSTANCE_WR(ctx, 0x180B0/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881);
|
||||
INSTANCE_WR(ctx, 0x181D0/4, 0xF);
|
||||
INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67);
|
||||
INSTANCE_WR(ctx, 0x18550/4, 0x11);
|
||||
INSTANCE_WR(ctx, 0x185B0/4, 0x4);
|
||||
INSTANCE_WR(ctx, 0x185F0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18610/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18690/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18730/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18770/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488);
|
||||
INSTANCE_WR(ctx, 0x18830/4, 0x4085C000);
|
||||
INSTANCE_WR(ctx, 0x18850/4, 0x40);
|
||||
INSTANCE_WR(ctx, 0x18870/4, 0x100);
|
||||
INSTANCE_WR(ctx, 0x18890/4, 0x10100);
|
||||
INSTANCE_WR(ctx, 0x188B0/4, 0x2800000);
|
||||
INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF);
|
||||
INSTANCE_WR(ctx, 0x18B50/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00);
|
||||
INSTANCE_WR(ctx, 0x18BB0/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00);
|
||||
INSTANCE_WR(ctx, 0x18D30/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18D70/4, 0x1);
|
||||
INSTANCE_WR(ctx, 0x18D90/4, 0x30201000);
|
||||
INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040);
|
||||
INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888);
|
||||
INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8);
|
||||
INSTANCE_WR(ctx, 0x18E30/4, 0x1A);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
nv50_graph_create_context(struct nouveau_channel *chan)
|
||||
{
|
||||
|
@ -272,10 +958,17 @@ nv50_graph_create_context(struct nouveau_channel *chan)
|
|||
INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
|
||||
INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
|
||||
|
||||
ret = engine->graph.load_context(chan);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error hacking up initial context: %d\n", ret);
|
||||
return ret;
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x86:
|
||||
nv86_graph_init_ctxvals(dev, chan->ramin_grctx);
|
||||
break;
|
||||
default:
|
||||
ret = engine->graph.load_context(chan);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error hacking up initial context: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4,
|
||||
|
|
|
@ -243,7 +243,8 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uin
|
|||
return -EINVAL;
|
||||
|
||||
gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE,
|
||||
*sz, NOUVEAU_MEM_FB,
|
||||
*sz, NOUVEAU_MEM_FB |
|
||||
NOUVEAU_MEM_NOVM,
|
||||
(struct drm_file *)-2);
|
||||
if (!gpuobj->im_backing) {
|
||||
DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n");
|
||||
|
|
|
@ -558,6 +558,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
#if __OS_HAS_AGP
|
||||
if (dev_priv->is_pci) {
|
||||
#endif
|
||||
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
|
||||
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
|
||||
dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
|
||||
dev_priv->gart_info.addr = NULL;
|
||||
|
|
|
@ -189,18 +189,12 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(R300_RE_CULL_CNTL, 1);
|
||||
ADD_RANGE(0x42C0, 2);
|
||||
ADD_RANGE(R300_RS_CNTL_0, 2);
|
||||
ADD_RANGE(R300_RS_INTERP_0, 8);
|
||||
ADD_RANGE(R300_RS_ROUTE_0, 8);
|
||||
|
||||
ADD_RANGE(0x43A4, 2);
|
||||
ADD_RANGE(0x43E8, 1);
|
||||
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
||||
ADD_RANGE(R300_PFS_NODE_0, 4);
|
||||
ADD_RANGE(R300_PFS_TEXI_0, 64);
|
||||
|
||||
ADD_RANGE(0x46A4, 5);
|
||||
ADD_RANGE(R300_PFS_INSTR0_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR1_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR2_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR3_0, 64);
|
||||
|
||||
ADD_RANGE(R300_RE_FOG_STATE, 1);
|
||||
ADD_RANGE(R300_FOG_COLOR_R, 3);
|
||||
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
|
||||
|
@ -241,7 +235,19 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
|
||||
ADD_RANGE(0x4074, 16);
|
||||
ADD_RANGE(R500_RS_IP_0, 16);
|
||||
ADD_RANGE(R500_RS_INST_0, 16);
|
||||
} else {
|
||||
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
||||
ADD_RANGE(R300_PFS_NODE_0, 4);
|
||||
ADD_RANGE(R300_PFS_TEXI_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR0_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR1_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR2_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR3_0, 64);
|
||||
ADD_RANGE(R300_RS_INTERP_0, 8);
|
||||
ADD_RANGE(R300_RS_ROUTE_0, 8);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,6 +332,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
|
|||
sz = header.packet0.count;
|
||||
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
|
||||
|
||||
DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz);
|
||||
if (!sz)
|
||||
return 0;
|
||||
|
||||
|
@ -729,6 +736,47 @@ static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
|
|||
buf->used = 0;
|
||||
}
|
||||
|
||||
static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
|
||||
drm_r300_cmd_header_t header)
|
||||
{
|
||||
u32 wait_until;
|
||||
RING_LOCALS;
|
||||
|
||||
if (!header.wait.flags)
|
||||
return;
|
||||
|
||||
wait_until = 0;
|
||||
|
||||
switch(header.wait.flags) {
|
||||
case R300_WAIT_2D:
|
||||
wait_until = RADEON_WAIT_2D_IDLE;
|
||||
break;
|
||||
case R300_WAIT_3D:
|
||||
wait_until = RADEON_WAIT_3D_IDLE;
|
||||
break;
|
||||
case R300_NEW_WAIT_2D_3D:
|
||||
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
|
||||
break;
|
||||
case R300_NEW_WAIT_2D_2D_CLEAN:
|
||||
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
|
||||
break;
|
||||
case R300_NEW_WAIT_3D_3D_CLEAN:
|
||||
wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
|
||||
break;
|
||||
case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
|
||||
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
|
||||
wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING(wait_until);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
static int r300_scratch(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
|
@ -786,6 +834,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads user-supplied vertex program instructions or parameters onto
|
||||
* the graphics card.
|
||||
* Called by r300_do_cp_cmdbuf.
|
||||
*/
|
||||
static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
{
|
||||
int sz;
|
||||
int addr;
|
||||
int type;
|
||||
int clamp;
|
||||
int stride;
|
||||
RING_LOCALS;
|
||||
|
||||
sz = header.r500fp.count;
|
||||
/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
|
||||
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
|
||||
|
||||
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
|
||||
clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
|
||||
|
||||
addr |= (type << 16);
|
||||
addr |= (clamp << 17);
|
||||
|
||||
stride = type ? 4 : 6;
|
||||
|
||||
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
|
||||
if (!sz)
|
||||
return 0;
|
||||
if (sz * stride * 4 > cmdbuf->bufsz)
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_RING(3 + sz * stride);
|
||||
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
|
||||
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
|
||||
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += sz * stride * 4;
|
||||
cmdbuf->bufsz -= sz * stride * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parses and validates a user-supplied command buffer and emits appropriate
|
||||
* commands on the DMA ring buffer.
|
||||
|
@ -824,7 +920,6 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
|
||||
switch (header.header.cmd_type) {
|
||||
case R300_CMD_PACKET0:
|
||||
DRM_DEBUG("R300_CMD_PACKET0\n");
|
||||
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
|
||||
if (ret) {
|
||||
DRM_ERROR("r300_emit_packet0 failed\n");
|
||||
|
@ -908,19 +1003,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
break;
|
||||
|
||||
case R300_CMD_WAIT:
|
||||
/* simple enough, we can do it here */
|
||||
DRM_DEBUG("R300_CMD_WAIT\n");
|
||||
if (header.wait.flags == 0)
|
||||
break; /* nothing to do */
|
||||
|
||||
{
|
||||
RING_LOCALS;
|
||||
|
||||
BEGIN_RING(2);
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING((header.wait.flags & 0xf) << 14);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
r300_cmd_wait(dev_priv, header);
|
||||
break;
|
||||
|
||||
case R300_CMD_SCRATCH:
|
||||
|
@ -932,6 +1016,19 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
}
|
||||
break;
|
||||
|
||||
case R300_CMD_R500FP:
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
|
||||
DRM_ERROR("Calling r500 command on r300 card\n");
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
DRM_DEBUG("R300_CMD_R500FP\n");
|
||||
ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
|
||||
if (ret) {
|
||||
DRM_ERROR("r300_emit_r500fp failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("bad cmd_type %i at %p\n",
|
||||
header.header.cmd_type,
|
||||
|
|
|
@ -1626,6 +1626,12 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
*/
|
||||
#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
|
||||
|
||||
#define R500_GA_US_VECTOR_INDEX 0x4250
|
||||
#define R500_GA_US_VECTOR_DATA 0x4254
|
||||
|
||||
#define R500_RS_IP_0 0x4074
|
||||
#define R500_RS_INST_0 0x4320
|
||||
|
||||
#endif /* _R300_REG_H */
|
||||
|
||||
/* *INDENT-ON* */
|
||||
|
|
16890
shared-core/radeon_cp.c
16890
shared-core/radeon_cp.c
File diff suppressed because it is too large
Load Diff
|
@ -225,9 +225,22 @@ typedef union {
|
|||
#define R300_CMD_WAIT 7
|
||||
# define R300_WAIT_2D 0x1
|
||||
# define R300_WAIT_3D 0x2
|
||||
/* these two defines are DOING IT WRONG - however
|
||||
* we have userspace which relies on using these.
|
||||
* The wait interface is backwards compat new
|
||||
* code should use the NEW_WAIT defines below
|
||||
* THESE ARE NOT BIT FIELDS
|
||||
*/
|
||||
# define R300_WAIT_2D_CLEAN 0x3
|
||||
# define R300_WAIT_3D_CLEAN 0x4
|
||||
|
||||
# define R300_NEW_WAIT_2D_3D 0x3
|
||||
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
|
||||
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
|
||||
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
|
||||
|
||||
#define R300_CMD_SCRATCH 8
|
||||
#define R300_CMD_R500FP 9
|
||||
|
||||
typedef union {
|
||||
unsigned int u;
|
||||
|
@ -256,6 +269,9 @@ typedef union {
|
|||
struct {
|
||||
unsigned char cmd_type, reg, n_bufs, flags;
|
||||
} scratch;
|
||||
struct {
|
||||
unsigned char cmd_type, count, adrlo, adrhi_flags;
|
||||
} r500fp;
|
||||
} drm_r300_cmd_header_t;
|
||||
|
||||
#define RADEON_FRONT 0x1
|
||||
|
@ -266,6 +282,9 @@ typedef union {
|
|||
#define RADEON_USE_HIERZ 0x40000000
|
||||
#define RADEON_USE_COMP_ZBUF 0x20000000
|
||||
|
||||
#define R500FP_CONSTANT_TYPE (1 << 1)
|
||||
#define R500FP_CONSTANT_CLAMP (1 << 2)
|
||||
|
||||
/* Primitive types
|
||||
*/
|
||||
#define RADEON_POINTS 0x1
|
||||
|
|
|
@ -501,6 +501,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
#define RS690_MC_FB_LOCATION 0x100
|
||||
#define RS690_MC_AGP_LOCATION 0x101
|
||||
#define RS690_MC_AGP_BASE 0x102
|
||||
#define RS690_MC_AGP_BASE_2 0x103
|
||||
|
||||
#define R520_MC_IND_INDEX 0x70
|
||||
#define R520_MC_IND_WR_EN (1<<24)
|
||||
|
|
|
@ -74,7 +74,6 @@ static inline int amd_cmd_bo_validate(struct drm_device *dev,
|
|||
bo->op_req.bo_req.mask,
|
||||
bo->op_req.bo_req.hint,
|
||||
bo->op_req.bo_req.fence_class,
|
||||
0,
|
||||
&bo->op_rep.bo_info,
|
||||
&cmd_bo->bo);
|
||||
if (ret) {
|
||||
|
@ -318,9 +317,19 @@ int amd_ioctl_cmd(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
DRM_ERROR("command dword count is 0.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* FIXME: Lock buffer manager, is this really needed ? */
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock);
|
||||
|
||||
/* FIXME: Lock buffer manager. This is needed so the X server can
|
||||
* block DRI clients while VT switched. The X server will then
|
||||
* take the lock in write mode
|
||||
*/
|
||||
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
||||
if (ret) {
|
||||
|
||||
/* FIXME: ret can be -EAGAIN here,
|
||||
* which really isn't an error.
|
||||
*/
|
||||
|
||||
DRM_ERROR("bo read locking failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -113,6 +113,8 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
|
|||
hw_addr, cur_addr, next_addr);
|
||||
return -1;
|
||||
}
|
||||
if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
|
||||
msleep(1);
|
||||
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
|
||||
return 0;
|
||||
}
|
||||
|
@ -406,27 +408,50 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
|
|||
int paused, count;
|
||||
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
|
||||
uint32_t reader,ptr;
|
||||
uint32_t diff;
|
||||
|
||||
paused = 0;
|
||||
via_flush_write_combine();
|
||||
(void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
|
||||
|
||||
*paused_at = pause_addr_lo;
|
||||
via_flush_write_combine();
|
||||
(void) *paused_at;
|
||||
|
||||
reader = *(dev_priv->hw_addr_ptr);
|
||||
ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
|
||||
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
|
||||
|
||||
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
|
||||
|
||||
if ((ptr - reader) <= dev_priv->dma_diff ) {
|
||||
count = 10000000;
|
||||
while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
|
||||
/*
|
||||
* If there is a possibility that the command reader will
|
||||
* miss the new pause address and pause on the old one,
|
||||
* In that case we need to program the new start address
|
||||
* using PCI.
|
||||
*/
|
||||
|
||||
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
|
||||
count = 10000000;
|
||||
while(diff == 0 && count--) {
|
||||
paused = (VIA_READ(0x41c) & 0x80000000);
|
||||
if (paused)
|
||||
break;
|
||||
reader = *(dev_priv->hw_addr_ptr);
|
||||
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
|
||||
}
|
||||
|
||||
paused = VIA_READ(0x41c) & 0x80000000;
|
||||
|
||||
if (paused && !no_pci_fire) {
|
||||
reader = *(dev_priv->hw_addr_ptr);
|
||||
if ((ptr - reader) == dev_priv->dma_diff) {
|
||||
|
||||
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
|
||||
diff &= (dev_priv->dma_high - 1);
|
||||
if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
|
||||
DRM_ERROR("Paused at incorrect address. "
|
||||
"0x%08x, 0x%08x 0x%08x\n",
|
||||
ptr, reader, dev_priv->dma_diff);
|
||||
} else if (diff == 0) {
|
||||
/*
|
||||
* There is a concern that these writes may stall the PCI bus
|
||||
* if the GPU is not idle. However, idling the GPU first
|
||||
|
@ -571,14 +596,14 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
|
|||
uint32_t pause_addr_lo, pause_addr_hi;
|
||||
uint32_t jump_addr_lo, jump_addr_hi;
|
||||
volatile uint32_t *last_pause_ptr;
|
||||
|
||||
uint32_t dma_low_save1, dma_low_save2;
|
||||
|
||||
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
|
||||
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
|
||||
&jump_addr_lo, 0);
|
||||
|
||||
dev_priv->dma_wrap = dev_priv->dma_low;
|
||||
|
||||
|
||||
/*
|
||||
* Wrap command buffer to the beginning.
|
||||
*/
|
||||
|
@ -590,15 +615,40 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
|
|||
|
||||
via_dummy_bitblt(dev_priv);
|
||||
via_dummy_bitblt(dev_priv);
|
||||
last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
|
||||
&pause_addr_lo, 0) -1;
|
||||
|
||||
last_pause_ptr =
|
||||
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
|
||||
&pause_addr_lo, 0) - 1;
|
||||
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
|
||||
&pause_addr_lo, 0);
|
||||
|
||||
*last_pause_ptr = pause_addr_lo;
|
||||
dma_low_save1 = dev_priv->dma_low;
|
||||
|
||||
/*
|
||||
* Now, set a trap that will pause the regulator if it tries to rerun the old
|
||||
* command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
|
||||
* and reissues the jump command over PCI, while the regulator has already taken the jump
|
||||
* and actually paused at the current buffer end).
|
||||
* There appears to be no other way to detect this condition, since the hw_addr_pointer
|
||||
* does not seem to get updated immediately when a jump occurs.
|
||||
*/
|
||||
|
||||
last_pause_ptr =
|
||||
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
|
||||
&pause_addr_lo, 0) - 1;
|
||||
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
|
||||
&pause_addr_lo, 0);
|
||||
*last_pause_ptr = pause_addr_lo;
|
||||
|
||||
|
||||
dma_low_save2 = dev_priv->dma_low;
|
||||
dev_priv->dma_low = dma_low_save1;
|
||||
via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
|
||||
dev_priv->dma_low = dma_low_save2;
|
||||
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
|
||||
}
|
||||
|
||||
|
||||
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
|
||||
{
|
||||
via_cmdbuf_jump(dev_priv);
|
||||
|
|
|
@ -190,11 +190,20 @@ int via_enable_vblank(struct drm_device *dev, int crtc)
|
|||
|
||||
status = VIA_READ(VIA_REG_INTERRUPT);
|
||||
VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
|
||||
|
||||
VIA_WRITE8(0x83d4, 0x11);
|
||||
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void via_disable_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
VIA_WRITE8(0x83d4, 0x11);
|
||||
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
|
||||
|
||||
if (crtc != 0)
|
||||
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
|
||||
}
|
||||
|
@ -311,6 +320,7 @@ int via_driver_irq_postinstall(struct drm_device * dev)
|
|||
if (!dev_priv)
|
||||
return -EINVAL;
|
||||
|
||||
drm_vblank_init(dev, 1);
|
||||
status = VIA_READ(VIA_REG_INTERRUPT);
|
||||
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
|
||||
| dev_priv->irq_enable_mask);
|
||||
|
|
Loading…
Reference in New Issue