Merge branch 'master' into modesetting-101
Conflicts: linux-core/drm_bufs.c shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.cmain
commit
5ce43a346c
|
@ -2487,6 +2487,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
|
||||||
fence->fence_class = arg.fence_class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->signaled = arg.signaled;
|
fence->signaled = arg.signaled;
|
||||||
|
fence->sequence = arg.sequence;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||||
i915_compat.o
|
i915_compat.o
|
||||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
||||||
nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o \
|
nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
|
||||||
nv04_timer.o \
|
nv04_timer.o \
|
||||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||||
nv04_fb.o nv10_fb.o nv40_fb.o \
|
nv04_fb.o nv10_fb.o nv40_fb.o \
|
||||||
|
|
|
@ -267,12 +267,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_i
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_ati_pcigart_init);
|
EXPORT_SYMBOL(drm_ati_pcigart_init);
|
||||||
|
|
||||||
static int ati_pcigart_needs_unbind_cache_adjust(drm_ttm_backend_t *backend)
|
static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
|
||||||
{
|
{
|
||||||
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
|
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ati_pcigart_populate(drm_ttm_backend_t *backend,
|
static int ati_pcigart_populate(struct drm_ttm_backend *backend,
|
||||||
unsigned long num_pages,
|
unsigned long num_pages,
|
||||||
struct page **pages)
|
struct page **pages)
|
||||||
{
|
{
|
||||||
|
@ -329,7 +329,7 @@ static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ati_pcigart_unbind_ttm(drm_ttm_backend_t *backend)
|
static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
|
||||||
{
|
{
|
||||||
ati_pcigart_ttm_backend_t *atipci_be =
|
ati_pcigart_ttm_backend_t *atipci_be =
|
||||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||||
|
@ -353,7 +353,7 @@ static int ati_pcigart_unbind_ttm(drm_ttm_backend_t *backend)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ati_pcigart_clear_ttm(drm_ttm_backend_t *backend)
|
static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
|
||||||
{
|
{
|
||||||
ati_pcigart_ttm_backend_t *atipci_be =
|
ati_pcigart_ttm_backend_t *atipci_be =
|
||||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||||
|
@ -367,7 +367,7 @@ static void ati_pcigart_clear_ttm(drm_ttm_backend_t *backend)
|
||||||
atipci_be->num_pages = 0;
|
atipci_be->num_pages = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ati_pcigart_destroy_ttm(drm_ttm_backend_t *backend)
|
static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
|
||||||
{
|
{
|
||||||
ati_pcigart_ttm_backend_t *atipci_be;
|
ati_pcigart_ttm_backend_t *atipci_be;
|
||||||
if (backend) {
|
if (backend) {
|
||||||
|
|
|
@ -1311,19 +1311,5 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
|
||||||
|
|
||||||
/*@}*/
|
/*@}*/
|
||||||
|
|
||||||
/** Type for the OS's non-sleepable mutex lock */
|
|
||||||
#define DRM_SPINTYPE spinlock_t
|
|
||||||
/**
|
|
||||||
* Initialize the lock for use. name is an optional string describing the
|
|
||||||
* lock
|
|
||||||
*/
|
|
||||||
#define DRM_SPININIT(l,name) spin_lock_init(l)
|
|
||||||
#define DRM_SPINUNINIT(l)
|
|
||||||
#define DRM_SPINLOCK(l) spin_lock(l)
|
|
||||||
#define DRM_SPINUNLOCK(l) spin_unlock(l)
|
|
||||||
#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
|
|
||||||
#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
|
|
||||||
#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -36,16 +36,16 @@
|
||||||
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
|
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
|
||||||
* when there is a chance that it can be zero before or after the operation.
|
* when there is a chance that it can be zero before or after the operation.
|
||||||
*
|
*
|
||||||
* dev->struct_mutex also protects all lists and list heads. Hash tables and hash
|
* dev->struct_mutex also protects all lists and list heads,
|
||||||
* heads.
|
* Hash tables and hash heads.
|
||||||
*
|
*
|
||||||
* bo->mutex protects the buffer object itself excluding the usage field.
|
* bo->mutex protects the buffer object itself excluding the usage field.
|
||||||
* bo->mutex does also protect the buffer list heads, so to manipulate those, we need
|
* bo->mutex does also protect the buffer list heads, so to manipulate those,
|
||||||
* both the bo->mutex and the dev->struct_mutex.
|
* we need both the bo->mutex and the dev->struct_mutex.
|
||||||
*
|
*
|
||||||
* Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
|
* Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
|
||||||
* complicated. When dev->struct_mutex is released to grab bo->mutex, the list
|
* is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
|
||||||
* traversal will, in general, need to be restarted.
|
* the list traversal will, in general, need to be restarted.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -286,11 +286,10 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (no_wait) {
|
if (no_wait)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
ret =
|
ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
|
||||||
drm_fence_object_wait(bo->fence, lazy, ignore_signals,
|
|
||||||
bo->fence_type);
|
bo->fence_type);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -358,9 +357,8 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&bo->usage)) {
|
if (!atomic_dec_and_test(&bo->usage))
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
if (!bo->fence) {
|
if (!bo->fence) {
|
||||||
list_del_init(&bo->lru);
|
list_del_init(&bo->lru);
|
||||||
|
@ -465,11 +463,10 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
|
||||||
|
|
||||||
drm_bo_cleanup_refs(entry, remove_all);
|
drm_bo_cleanup_refs(entry, remove_all);
|
||||||
|
|
||||||
if (nentry) {
|
if (nentry)
|
||||||
atomic_dec(&nentry->usage);
|
atomic_dec(&nentry->usage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||||
static void drm_bo_delayed_workqueue(void *data)
|
static void drm_bo_delayed_workqueue(void *data)
|
||||||
|
@ -508,10 +505,9 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
|
DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
|
||||||
|
|
||||||
if (atomic_dec_and_test(&tmp_bo->usage)) {
|
if (atomic_dec_and_test(&tmp_bo->usage))
|
||||||
drm_bo_destroy_locked(tmp_bo);
|
drm_bo_destroy_locked(tmp_bo);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
|
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
|
||||||
|
|
||||||
static void drm_bo_base_deref_locked(struct drm_file *file_priv,
|
static void drm_bo_base_deref_locked(struct drm_file *file_priv,
|
||||||
|
@ -686,7 +682,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
||||||
struct drm_bo_mem_reg evict_mem;
|
struct drm_bo_mem_reg evict_mem;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Someone might have modified the buffer before we took the buffer mutex.
|
* Someone might have modified the buffer before we took the
|
||||||
|
* buffer mutex.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
|
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
|
||||||
|
@ -940,7 +937,6 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
|
||||||
ret = (has_eagain) ? -EAGAIN : -ENOMEM;
|
ret = (has_eagain) ? -EAGAIN : -ENOMEM;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_mem_space);
|
EXPORT_SYMBOL(drm_bo_mem_space);
|
||||||
|
|
||||||
static int drm_bo_new_mask(struct drm_buffer_object *bo,
|
static int drm_bo_new_mask(struct drm_buffer_object *bo,
|
||||||
|
@ -949,22 +945,19 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo,
|
||||||
uint32_t new_props;
|
uint32_t new_props;
|
||||||
|
|
||||||
if (bo->type == drm_bo_type_user &&
|
if (bo->type == drm_bo_type_user &&
|
||||||
((used_mask & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
|
((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
|
||||||
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
|
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
|
||||||
DRM_ERROR("User buffers require cache-coherent memory.\n");
|
DRM_ERROR("User buffers require cache-coherent memory.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
||||||
DRM_ERROR
|
DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
|
||||||
("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
|
|
||||||
"processes.\n");
|
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
|
if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
|
||||||
DRM_ERROR
|
DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
|
||||||
("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
|
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1360,9 +1353,9 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
||||||
return 0;
|
return 0;
|
||||||
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
||||||
(/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
|
(/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
|
||||||
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
|
(mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
|
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
|
||||||
((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
|
((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
|
||||||
(mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
|
(mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
|
||||||
|
@ -1587,9 +1580,9 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!bo) {
|
if (!bo)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
mutex_lock(&bo->mutex);
|
mutex_lock(&bo->mutex);
|
||||||
if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
||||||
(void)drm_bo_busy(bo);
|
(void)drm_bo_busy(bo);
|
||||||
|
@ -1612,9 +1605,8 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!bo) {
|
if (!bo)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&bo->mutex);
|
mutex_lock(&bo->mutex);
|
||||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||||
|
@ -2195,24 +2187,22 @@ int drm_bo_driver_finish(struct drm_device * dev)
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!cancel_delayed_work(&bm->wq)) {
|
if (!cancel_delayed_work(&bm->wq))
|
||||||
flush_scheduled_work();
|
flush_scheduled_work();
|
||||||
}
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
drm_bo_delayed_delete(dev, 1);
|
drm_bo_delayed_delete(dev, 1);
|
||||||
if (list_empty(&bm->ddestroy)) {
|
if (list_empty(&bm->ddestroy))
|
||||||
DRM_DEBUG("Delayed destroy list was clean\n");
|
DRM_DEBUG("Delayed destroy list was clean\n");
|
||||||
}
|
|
||||||
if (list_empty(&bm->man[0].lru)) {
|
if (list_empty(&bm->man[0].lru))
|
||||||
DRM_DEBUG("Swap list was clean\n");
|
DRM_DEBUG("Swap list was clean\n");
|
||||||
}
|
|
||||||
if (list_empty(&bm->man[0].pinned)) {
|
if (list_empty(&bm->man[0].pinned))
|
||||||
DRM_DEBUG("NO_MOVE list was clean\n");
|
DRM_DEBUG("NO_MOVE list was clean\n");
|
||||||
}
|
|
||||||
if (list_empty(&bm->unfenced)) {
|
if (list_empty(&bm->unfenced))
|
||||||
DRM_DEBUG("Unfenced list was clean\n");
|
DRM_DEBUG("Unfenced list was clean\n");
|
||||||
}
|
|
||||||
out:
|
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||||
unlock_page(bm->dummy_read_page);
|
unlock_page(bm->dummy_read_page);
|
||||||
|
@ -2220,6 +2210,8 @@ int drm_bo_driver_finish(struct drm_device * dev)
|
||||||
ClearPageReserved(bm->dummy_read_page);
|
ClearPageReserved(bm->dummy_read_page);
|
||||||
#endif
|
#endif
|
||||||
__free_page(bm->dummy_read_page);
|
__free_page(bm->dummy_read_page);
|
||||||
|
|
||||||
|
out:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2279,7 +2271,6 @@ int drm_bo_driver_init(struct drm_device * dev)
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_driver_init);
|
EXPORT_SYMBOL(drm_bo_driver_init);
|
||||||
|
|
||||||
int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||||
|
@ -2450,7 +2441,6 @@ int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_mem_reg_is_pci);
|
EXPORT_SYMBOL(drm_mem_reg_is_pci);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -73,7 +73,6 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock)
|
||||||
if (atomic_read(&lock->readers) == 0)
|
if (atomic_read(&lock->readers) == 0)
|
||||||
wake_up_interruptible(&lock->queue);
|
wake_up_interruptible(&lock->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_read_unlock);
|
EXPORT_SYMBOL(drm_bo_read_unlock);
|
||||||
|
|
||||||
int drm_bo_read_lock(struct drm_bo_lock *lock)
|
int drm_bo_read_lock(struct drm_bo_lock *lock)
|
||||||
|
@ -95,7 +94,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_read_lock);
|
EXPORT_SYMBOL(drm_bo_read_lock);
|
||||||
|
|
||||||
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
|
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
|
||||||
|
@ -123,9 +121,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
|
|
||||||
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) {
|
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
|
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
|
||||||
ret = wait_event_interruptible
|
ret = wait_event_interruptible
|
||||||
|
@ -149,9 +146,9 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
||||||
ret = drm_add_user_object(file_priv, &lock->base, 0);
|
ret = drm_add_user_object(file_priv, &lock->base, 0);
|
||||||
lock->base.remove = &drm_bo_write_lock_remove;
|
lock->base.remove = &drm_bo_write_lock_remove;
|
||||||
lock->base.type = drm_lock_type;
|
lock->base.type = drm_lock_type;
|
||||||
if (ret) {
|
if (ret)
|
||||||
(void)__drm_bo_write_unlock(lock);
|
(void)__drm_bo_write_unlock(lock);
|
||||||
}
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -82,7 +82,6 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,
|
||||||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_move_ttm);
|
EXPORT_SYMBOL(drm_bo_move_ttm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -145,10 +144,9 @@ void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
|
||||||
bm = &dev->bm;
|
bm = &dev->bm;
|
||||||
man = &bm->man[mem->mem_type];
|
man = &bm->man[mem->mem_type];
|
||||||
|
|
||||||
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
|
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
|
||||||
iounmap(virtual);
|
iounmap(virtual);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_mem_reg_iounmap);
|
EXPORT_SYMBOL(drm_mem_reg_iounmap);
|
||||||
|
|
||||||
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
|
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||||
|
@ -164,7 +162,8 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page)
|
static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
|
||||||
|
unsigned long page)
|
||||||
{
|
{
|
||||||
struct page *d = drm_ttm_get_page(ttm, page);
|
struct page *d = drm_ttm_get_page(ttm, page);
|
||||||
void *dst;
|
void *dst;
|
||||||
|
@ -272,7 +271,6 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,
|
||||||
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
|
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_move_memcpy);
|
EXPORT_SYMBOL(drm_bo_move_memcpy);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -324,11 +322,9 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
||||||
int evict,
|
int evict, int no_wait, uint32_t fence_class,
|
||||||
int no_wait,
|
uint32_t fence_type, uint32_t fence_flags,
|
||||||
uint32_t fence_class,
|
struct drm_bo_mem_reg *new_mem)
|
||||||
uint32_t fence_type,
|
|
||||||
uint32_t fence_flags, struct drm_bo_mem_reg * new_mem)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = bo->dev;
|
struct drm_device *dev = bo->dev;
|
||||||
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
|
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
|
||||||
|
@ -408,7 +404,6 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
||||||
|
|
||||||
int drm_bo_same_page(unsigned long offset,
|
int drm_bo_same_page(unsigned long offset,
|
||||||
|
@ -421,13 +416,11 @@ EXPORT_SYMBOL(drm_bo_same_page);
|
||||||
unsigned long drm_bo_offset_end(unsigned long offset,
|
unsigned long drm_bo_offset_end(unsigned long offset,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
|
||||||
offset = (offset + PAGE_SIZE) & PAGE_MASK;
|
offset = (offset + PAGE_SIZE) & PAGE_MASK;
|
||||||
return (end < offset) ? end : offset;
|
return (end < offset) ? end : offset;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_offset_end);
|
EXPORT_SYMBOL(drm_bo_offset_end);
|
||||||
|
|
||||||
|
|
||||||
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
|
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
|
||||||
{
|
{
|
||||||
pgprot_t tmp = PAGE_KERNEL;
|
pgprot_t tmp = PAGE_KERNEL;
|
||||||
|
@ -476,8 +469,9 @@ static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
|
||||||
return (!map->virtual) ? -ENOMEM : 0;
|
return (!map->virtual) ? -ENOMEM : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page,
|
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
|
||||||
unsigned long num_pages, struct drm_bo_kmap_obj *map)
|
unsigned long start_page, unsigned long num_pages,
|
||||||
|
struct drm_bo_kmap_obj *map)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = bo->dev;
|
struct drm_device *dev = bo->dev;
|
||||||
struct drm_bo_mem_reg *mem = &bo->mem;
|
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||||
|
@ -531,7 +525,8 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag
|
||||||
* and caching policy the buffer currently has.
|
* and caching policy the buffer currently has.
|
||||||
* Mapping multiple pages or buffers that live in io memory is a bit slow and
|
* Mapping multiple pages or buffers that live in io memory is a bit slow and
|
||||||
* consumes vmalloc space. Be restrictive with such mappings.
|
* consumes vmalloc space. Be restrictive with such mappings.
|
||||||
* Mapping single pages usually returns the logical kernel address, (which is fast)
|
* Mapping single pages usually returns the logical kernel address,
|
||||||
|
* (which is fast)
|
||||||
* BUG may use slower temporary mappings for high memory pages or
|
* BUG may use slower temporary mappings for high memory pages or
|
||||||
* uncached / write-combined pages.
|
* uncached / write-combined pages.
|
||||||
*
|
*
|
||||||
|
|
|
@ -1601,5 +1601,3 @@ int drm_order(unsigned long size)
|
||||||
return order;
|
return order;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_order);
|
EXPORT_SYMBOL(drm_order);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ int drm_dma_setup(struct drm_device * dev)
|
||||||
* \param dev DRM device.
|
* \param dev DRM device.
|
||||||
*
|
*
|
||||||
* Free all pages associated with DMA buffers, the buffers and pages lists, and
|
* Free all pages associated with DMA buffers, the buffers and pages lists, and
|
||||||
* finally the the drm_device::dma structure itself.
|
* finally the drm_device::dma structure itself.
|
||||||
*/
|
*/
|
||||||
void drm_dma_takedown(struct drm_device * dev)
|
void drm_dma_takedown(struct drm_device * dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -58,9 +58,8 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
||||||
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
||||||
ge_last_exe = diff < driver->wrap_diff;
|
ge_last_exe = diff < driver->wrap_diff;
|
||||||
|
|
||||||
if (is_exe && ge_last_exe) {
|
if (is_exe && ge_last_exe)
|
||||||
fc->last_exe_flush = sequence;
|
fc->last_exe_flush = sequence;
|
||||||
}
|
|
||||||
|
|
||||||
if (list_empty(&fc->ring))
|
if (list_empty(&fc->ring))
|
||||||
return;
|
return;
|
||||||
|
@ -141,7 +140,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
||||||
DRM_WAKEUP(&fc->fence_queue);
|
DRM_WAKEUP(&fc->fence_queue);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_fence_handler);
|
EXPORT_SYMBOL(drm_fence_handler);
|
||||||
|
|
||||||
static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
|
static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
|
||||||
|
@ -212,7 +210,8 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
||||||
|
|
||||||
static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
|
static void drm_fence_object_destroy(struct drm_file *priv,
|
||||||
|
struct drm_user_object *base)
|
||||||
{
|
{
|
||||||
struct drm_fence_object *fence =
|
struct drm_fence_object *fence =
|
||||||
drm_user_object_entry(base, struct drm_fence_object, base);
|
drm_user_object_entry(base, struct drm_fence_object, base);
|
||||||
|
@ -241,7 +240,8 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||||
EXPORT_SYMBOL(drm_fence_object_signaled);
|
EXPORT_SYMBOL(drm_fence_object_signaled);
|
||||||
|
|
||||||
static void drm_fence_flush_exe(struct drm_fence_class_manager *fc,
|
static void drm_fence_flush_exe(struct drm_fence_class_manager *fc,
|
||||||
struct drm_fence_driver * driver, uint32_t sequence)
|
struct drm_fence_driver *driver,
|
||||||
|
uint32_t sequence)
|
||||||
{
|
{
|
||||||
uint32_t diff;
|
uint32_t diff;
|
||||||
|
|
||||||
|
@ -249,13 +249,11 @@ static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
||||||
fc->exe_flush_sequence = sequence;
|
fc->exe_flush_sequence = sequence;
|
||||||
fc->pending_exe_flush = 1;
|
fc->pending_exe_flush = 1;
|
||||||
} else {
|
} else {
|
||||||
diff =
|
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
||||||
(sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
if (diff < driver->wrap_diff)
|
||||||
if (diff < driver->wrap_diff) {
|
|
||||||
fc->exe_flush_sequence = sequence;
|
fc->exe_flush_sequence = sequence;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_object_flush(struct drm_fence_object *fence,
|
int drm_fence_object_flush(struct drm_fence_object *fence,
|
||||||
uint32_t type)
|
uint32_t type)
|
||||||
|
@ -296,7 +294,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||||
* wrapped around and reused.
|
* wrapped around and reused.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence)
|
void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
|
||||||
|
uint32_t sequence)
|
||||||
{
|
{
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
|
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
|
||||||
|
@ -328,12 +327,10 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
|
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
read_unlock_irqrestore(&fm->lock, flags);
|
||||||
if (diff < driver->wrap_diff) {
|
if (diff < driver->wrap_diff)
|
||||||
drm_fence_object_flush(fence, fence->type);
|
drm_fence_object_flush(fence, fence->type);
|
||||||
}
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_fence_flush_old);
|
EXPORT_SYMBOL(drm_fence_flush_old);
|
||||||
|
|
||||||
static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
||||||
|
@ -431,10 +428,9 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
/*
|
/*
|
||||||
* Avoid kernel-space busy-waits.
|
* Avoid kernel-space busy-waits.
|
||||||
*/
|
*/
|
||||||
#if 1
|
|
||||||
if (!ignore_signals)
|
if (!ignore_signals)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
#endif
|
|
||||||
do {
|
do {
|
||||||
schedule();
|
schedule();
|
||||||
signaled = drm_fence_object_signaled(fence, mask, 1);
|
signaled = drm_fence_object_signaled(fence, mask, 1);
|
||||||
|
@ -447,9 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fence_object_wait);
|
EXPORT_SYMBOL(drm_fence_object_wait);
|
||||||
|
|
||||||
|
int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
|
||||||
int drm_fence_object_emit(struct drm_fence_object * fence,
|
uint32_t fence_class, uint32_t type)
|
||||||
uint32_t fence_flags, uint32_t fence_class, uint32_t type)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fence->dev;
|
struct drm_device *dev = fence->dev;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
|
@ -461,7 +456,8 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
drm_fence_unring(dev, &fence->ring);
|
drm_fence_unring(dev, &fence->ring);
|
||||||
ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);
|
ret = driver->emit(dev, fence_class, fence_flags, &sequence,
|
||||||
|
&native_type);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -517,8 +513,8 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
|
int drm_fence_add_user_object(struct drm_file *priv,
|
||||||
int shareable)
|
struct drm_fence_object *fence, int shareable)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = priv->head->dev;
|
struct drm_device *dev = priv->head->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -537,8 +533,9 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fence_add_user_object);
|
EXPORT_SYMBOL(drm_fence_add_user_object);
|
||||||
|
|
||||||
int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,
|
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
|
||||||
unsigned flags, struct drm_fence_object ** c_fence)
|
uint32_t type, unsigned flags,
|
||||||
|
struct drm_fence_object **c_fence)
|
||||||
{
|
{
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -557,7 +554,6 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint3
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_fence_object_create);
|
EXPORT_SYMBOL(drm_fence_object_create);
|
||||||
|
|
||||||
void drm_fence_manager_init(struct drm_device *dev)
|
void drm_fence_manager_init(struct drm_device *dev)
|
||||||
|
@ -591,7 +587,8 @@ void drm_fence_manager_init(struct drm_device * dev)
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
|
void drm_fence_fill_arg(struct drm_fence_object *fence,
|
||||||
|
struct drm_fence_arg *arg)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fence->dev;
|
struct drm_device *dev = fence->dev;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
|
@ -608,12 +605,12 @@ void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *ar
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fence_fill_arg);
|
EXPORT_SYMBOL(drm_fence_fill_arg);
|
||||||
|
|
||||||
|
|
||||||
void drm_fence_manager_takedown(struct drm_device *dev)
|
void drm_fence_manager_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
|
struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
|
||||||
|
uint32_t handle)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = priv->head->dev;
|
struct drm_device *dev = priv->head->dev;
|
||||||
struct drm_user_object *uo;
|
struct drm_user_object *uo;
|
||||||
|
@ -663,7 +660,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
arg->handle = fence->base.hash.key;
|
||||||
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
drm_fence_fill_arg(fence, arg);
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
|
|
|
@ -273,9 +273,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
for(j=0; j<i; ++j) {
|
for(j = 0; j < i; ++j)
|
||||||
drm_ht_remove(&priv->refd_object_hash[j]);
|
drm_ht_remove(&priv->refd_object_hash[j]);
|
||||||
}
|
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,8 +333,8 @@ int drm_fasync(int fd, struct file *filp, int on)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fasync);
|
EXPORT_SYMBOL(drm_fasync);
|
||||||
|
|
||||||
static void drm_object_release(struct file *filp) {
|
static void drm_object_release(struct file *filp)
|
||||||
|
{
|
||||||
struct drm_file *priv = filp->private_data;
|
struct drm_file *priv = filp->private_data;
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct drm_ref_object *ref_object;
|
struct drm_ref_object *ref_object;
|
||||||
|
@ -530,4 +529,3 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_poll);
|
EXPORT_SYMBOL(drm_poll);
|
||||||
|
|
||||||
|
|
|
@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -264,7 +264,7 @@ int drm_getstats(struct drm_device *dev, void *data,
|
||||||
struct drm_stats *stats = data;
|
struct drm_stats *stats = data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
memset(stats, 0, sizeof(stats));
|
memset(stats, 0, sizeof(*stats));
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
|
|
@ -66,9 +66,9 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
|
||||||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
||||||
|
|
||||||
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
|
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
|
||||||
if (ret) {
|
if (ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
item = drm_hash_entry(hash, struct drm_user_object, hash);
|
item = drm_hash_entry(hash, struct drm_user_object, hash);
|
||||||
|
|
||||||
if (priv != item->owner) {
|
if (priv != item->owner) {
|
||||||
|
@ -234,6 +234,7 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_remove_ref_object);
|
||||||
|
|
||||||
int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
|
int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
|
||||||
enum drm_object_type type, struct drm_user_object **object)
|
enum drm_object_type type, struct drm_user_object **object)
|
||||||
|
|
|
@ -183,18 +183,21 @@ struct drm_fence_driver {
|
||||||
int lazy_capable;
|
int lazy_capable;
|
||||||
int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
|
int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t flags);
|
uint32_t flags);
|
||||||
int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags,
|
int (*emit) (struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t * breadcrumb, uint32_t * native_type);
|
uint32_t flags, uint32_t *breadcrumb,
|
||||||
|
uint32_t *native_type);
|
||||||
void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);
|
void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t sequence, uint32_t type, uint32_t error);
|
uint32_t sequence, uint32_t type,
|
||||||
|
uint32_t error);
|
||||||
extern void drm_fence_manager_init(struct drm_device *dev);
|
extern void drm_fence_manager_init(struct drm_device *dev);
|
||||||
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
||||||
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
|
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t sequence);
|
uint32_t sequence);
|
||||||
extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
|
extern int drm_fence_object_flush(struct drm_fence_object *fence,
|
||||||
|
uint32_t type);
|
||||||
extern int drm_fence_object_signaled(struct drm_fence_object *fence,
|
extern int drm_fence_object_signaled(struct drm_fence_object *fence,
|
||||||
uint32_t type, int flush);
|
uint32_t type, int flush);
|
||||||
extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
|
extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
|
||||||
|
@ -214,7 +217,8 @@ extern void drm_fence_fill_arg(struct drm_fence_object *fence,
|
||||||
struct drm_fence_arg *arg);
|
struct drm_fence_arg *arg);
|
||||||
|
|
||||||
extern int drm_fence_add_user_object(struct drm_file *priv,
|
extern int drm_fence_add_user_object(struct drm_file *priv,
|
||||||
struct drm_fence_object * fence, int shareable);
|
struct drm_fence_object *fence,
|
||||||
|
int shareable);
|
||||||
|
|
||||||
extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
|
extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
@ -241,7 +245,7 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
|
||||||
/*
|
/*
|
||||||
* The ttm backend GTT interface. (In our case AGP).
|
* The ttm backend GTT interface. (In our case AGP).
|
||||||
* Any similar type of device (PCIE?)
|
* Any similar type of device (PCIE?)
|
||||||
* needs only to implement these functions to be usable with the "TTM" interface.
|
* needs only to implement these functions to be usable with the TTM interface.
|
||||||
* The AGP backend implementation lives in drm_agpsupport.c
|
* The AGP backend implementation lives in drm_agpsupport.c
|
||||||
* basically maps these calls to available functions in agpgart.
|
* basically maps these calls to available functions in agpgart.
|
||||||
* Each drm device driver gets an
|
* Each drm device driver gets an
|
||||||
|
@ -267,11 +271,11 @@ struct drm_ttm_backend_func {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
typedef struct drm_ttm_backend {
|
struct drm_ttm_backend {
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
struct drm_ttm_backend_func *func;
|
struct drm_ttm_backend_func *func;
|
||||||
} drm_ttm_backend_t;
|
};
|
||||||
|
|
||||||
struct drm_ttm {
|
struct drm_ttm {
|
||||||
struct mm_struct *user_mm;
|
struct mm_struct *user_mm;
|
||||||
|
@ -309,9 +313,9 @@ extern int drm_ttm_set_user(struct drm_ttm *ttm,
|
||||||
struct page *dummy_read_page);
|
struct page *dummy_read_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
|
||||||
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
|
* this which calls this function iff there are no vmas referencing it anymore.
|
||||||
* when the last vma exits.
|
* Otherwise it is called when the last vma exits.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drm_destroy_ttm(struct drm_ttm *ttm);
|
extern int drm_destroy_ttm(struct drm_ttm *ttm);
|
||||||
|
@ -551,15 +555,14 @@ extern int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
|
extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
|
||||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
|
int evict, int no_wait,
|
||||||
|
struct drm_bo_mem_reg *new_mem);
|
||||||
extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
|
extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
|
||||||
int evict,
|
int evict,
|
||||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||||
extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
||||||
int evict,
|
int evict, int no_wait,
|
||||||
int no_wait,
|
uint32_t fence_class, uint32_t fence_type,
|
||||||
uint32_t fence_class,
|
|
||||||
uint32_t fence_type,
|
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_bo_mem_reg *new_mem);
|
struct drm_bo_mem_reg *new_mem);
|
||||||
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
|
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
|
||||||
|
|
|
@ -129,3 +129,17 @@ do { \
|
||||||
|
|
||||||
#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
|
#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
|
||||||
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
|
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
|
||||||
|
|
||||||
|
/** Type for the OS's non-sleepable mutex lock */
|
||||||
|
#define DRM_SPINTYPE spinlock_t
|
||||||
|
/**
|
||||||
|
* Initialize the lock for use. name is an optional string describing the
|
||||||
|
* lock
|
||||||
|
*/
|
||||||
|
#define DRM_SPININIT(l,name) spin_lock_init(l)
|
||||||
|
#define DRM_SPINUNINIT(l)
|
||||||
|
#define DRM_SPINLOCK(l) spin_lock(l)
|
||||||
|
#define DRM_SPINUNLOCK(l) spin_unlock(l)
|
||||||
|
#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
|
||||||
|
#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
|
||||||
|
#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
|
||||||
|
|
|
@ -54,18 +54,17 @@ static void ttm_alloc_pages(struct drm_ttm * ttm)
|
||||||
if (drm_alloc_memctl(size))
|
if (drm_alloc_memctl(size))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (size <= PAGE_SIZE) {
|
if (size <= PAGE_SIZE)
|
||||||
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
|
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
|
||||||
}
|
|
||||||
if (!ttm->pages) {
|
if (!ttm->pages) {
|
||||||
ttm->pages = vmalloc_user(size);
|
ttm->pages = vmalloc_user(size);
|
||||||
if (ttm->pages)
|
if (ttm->pages)
|
||||||
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
|
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
|
||||||
}
|
}
|
||||||
if (!ttm->pages) {
|
if (!ttm->pages)
|
||||||
drm_free_memctl(size);
|
drm_free_memctl(size);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static void ttm_free_pages(struct drm_ttm *ttm)
|
static void ttm_free_pages(struct drm_ttm *ttm)
|
||||||
{
|
{
|
||||||
|
@ -85,9 +84,9 @@ static struct page *drm_ttm_alloc_page(void)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
if (drm_alloc_memctl(PAGE_SIZE))
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
drm_free_memctl(PAGE_SIZE);
|
drm_free_memctl(PAGE_SIZE);
|
||||||
|
@ -186,14 +185,10 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
|
||||||
#else
|
#else
|
||||||
ClearPageReserved(*cur_page);
|
ClearPageReserved(*cur_page);
|
||||||
#endif
|
#endif
|
||||||
if (page_count(*cur_page) != 1) {
|
if (page_count(*cur_page) != 1)
|
||||||
DRM_ERROR("Erroneous page count. "
|
DRM_ERROR("Erroneous page count. Leaking pages.\n");
|
||||||
"Leaking pages.\n");
|
if (page_mapped(*cur_page))
|
||||||
}
|
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
|
||||||
if (page_mapped(*cur_page)) {
|
|
||||||
DRM_ERROR("Erroneous map count. "
|
|
||||||
"Leaking page mappings.\n");
|
|
||||||
}
|
|
||||||
__free_page(*cur_page);
|
__free_page(*cur_page);
|
||||||
drm_free_memctl(PAGE_SIZE);
|
drm_free_memctl(PAGE_SIZE);
|
||||||
--bm->cur_pages;
|
--bm->cur_pages;
|
||||||
|
@ -284,10 +279,9 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_pages; ++i) {
|
for (i = 0; i < num_pages; ++i) {
|
||||||
if (ttm->pages[i] == NULL) {
|
if (ttm->pages[i] == NULL)
|
||||||
ttm->pages[i] = ttm->dummy_read_page;
|
ttm->pages[i] = ttm->dummy_read_page;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -380,9 +374,8 @@ void drm_ttm_fixup_caching(struct drm_ttm * ttm)
|
||||||
|
|
||||||
if (ttm->state == ttm_evicted) {
|
if (ttm->state == ttm_evicted) {
|
||||||
struct drm_ttm_backend *be = ttm->be;
|
struct drm_ttm_backend *be = ttm->be;
|
||||||
if (be->func->needs_ub_cache_adjust(be)) {
|
if (be->func->needs_ub_cache_adjust(be))
|
||||||
drm_set_caching(ttm, 0);
|
drm_set_caching(ttm, 0);
|
||||||
}
|
|
||||||
ttm->state = ttm_unbound;
|
ttm->state = ttm_unbound;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -412,13 +405,14 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
|
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
|
||||||
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
||||||
} else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
|
else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
|
||||||
bo_driver->ttm_cache_flush)
|
bo_driver->ttm_cache_flush)
|
||||||
bo_driver->ttm_cache_flush(ttm);
|
bo_driver->ttm_cache_flush(ttm);
|
||||||
|
|
||||||
if ((ret = be->func->bind(be, bo_mem))) {
|
ret = be->func->bind(be, bo_mem);
|
||||||
|
if (ret) {
|
||||||
ttm->state = ttm_evicted;
|
ttm->state = ttm_evicted;
|
||||||
DRM_ERROR("Couldn't bind backend.\n");
|
DRM_ERROR("Couldn't bind backend.\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -429,5 +423,4 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
||||||
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
|
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bind_ttm);
|
EXPORT_SYMBOL(drm_bind_ttm);
|
||||||
|
|
|
@ -166,7 +166,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
||||||
* \param address access address.
|
* \param address access address.
|
||||||
* \return pointer to the page structure.
|
* \return pointer to the page structure.
|
||||||
*
|
*
|
||||||
* Get the the mapping, find the real physical page to map, get the page, and
|
* Get the mapping, find the real physical page to map, get the page, and
|
||||||
* return it.
|
* return it.
|
||||||
*/
|
*/
|
||||||
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
|
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright 2005 Stephane Marchesin.
|
* Copyright 2007 Dave Airlied
|
||||||
* All Rights Reserved.
|
* All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
@ -22,27 +22,39 @@
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Authors: Jeremy Kolb <jkolb@brandeis.edu>
|
* Authors: Dave Airlied <airlied@linux.ie>
|
||||||
|
* Ben Skeggs <darktama@iinet.net.au>
|
||||||
|
* Jeremy Kolb <jkolb@brandeis.edu>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
#include "nouveau_drm.h"
|
#include "nouveau_drm.h"
|
||||||
#include "nouveau_drv.h"
|
#include "nouveau_drv.h"
|
||||||
|
#include "nouveau_dma.h"
|
||||||
|
|
||||||
#ifdef NOUVEAU_HAVE_BUFFER
|
static struct drm_ttm_backend *
|
||||||
|
nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
|
||||||
struct drm_ttm_backend *nouveau_create_ttm_backend_entry(struct drm_device * dev)
|
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
switch (dev_priv->gart_info.type) {
|
||||||
|
case NOUVEAU_GART_AGP:
|
||||||
return drm_agp_init_ttm(dev);
|
return drm_agp_init_ttm(dev);
|
||||||
|
case NOUVEAU_GART_SGDMA:
|
||||||
|
return nouveau_sgdma_init_ttm(dev);
|
||||||
|
default:
|
||||||
|
DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nouveau_fence_types(struct drm_buffer_object *bo,
|
return NULL;
|
||||||
uint32_t *fclass,
|
}
|
||||||
uint32_t *type)
|
|
||||||
|
static int
|
||||||
|
nouveau_bo_fence_type(struct drm_buffer_object *bo,
|
||||||
|
uint32_t *fclass, uint32_t *type)
|
||||||
{
|
{
|
||||||
*fclass = 0;
|
/* When we get called, *fclass is set to the requested fence class */
|
||||||
|
|
||||||
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||||
*type = 3;
|
*type = 3;
|
||||||
|
@ -51,14 +63,16 @@ int nouveau_fence_types(struct drm_buffer_object *bo,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
int nouveau_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
|
|
||||||
|
static int
|
||||||
|
nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
|
||||||
{
|
{
|
||||||
/* We'll do this from user space. */
|
/* We'll do this from user space. */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nouveau_init_mem_type(struct drm_device *dev,
|
static int
|
||||||
uint32_t type,
|
nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||||
struct drm_mem_type_manager *man)
|
struct drm_mem_type_manager *man)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
@ -69,32 +83,46 @@ int nouveau_init_mem_type(struct drm_device *dev,
|
||||||
_DRM_FLAG_MEMTYPE_CACHED;
|
_DRM_FLAG_MEMTYPE_CACHED;
|
||||||
man->drm_bus_maptype = 0;
|
man->drm_bus_maptype = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DRM_BO_MEM_VRAM:
|
case DRM_BO_MEM_VRAM:
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_FIXED |
|
man->flags = _DRM_FLAG_MEMTYPE_FIXED |
|
||||||
_DRM_FLAG_MEMTYPE_MAPPABLE |
|
_DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_NEEDS_IOREMAP;
|
_DRM_FLAG_NEEDS_IOREMAP;
|
||||||
man->io_addr = NULL;
|
man->io_addr = NULL;
|
||||||
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
|
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
|
||||||
man->io_offset = drm_get_resource_start(dev, 0);
|
man->io_offset = drm_get_resource_start(dev, 1);
|
||||||
man->io_size = drm_get_resource_len(dev, 0);
|
man->io_size = drm_get_resource_len(dev, 1);
|
||||||
|
if (man->io_size > nouveau_mem_fb_amount(dev))
|
||||||
|
man->io_size = nouveau_mem_fb_amount(dev);
|
||||||
|
break;
|
||||||
|
case DRM_BO_MEM_PRIV0:
|
||||||
|
/* Unmappable VRAM */
|
||||||
|
man->flags = _DRM_FLAG_MEMTYPE_CMA;
|
||||||
|
man->drm_bus_maptype = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DRM_BO_MEM_TT:
|
case DRM_BO_MEM_TT:
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
switch (dev_priv->gart_info.type) {
|
||||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
case NOUVEAU_GART_AGP:
|
||||||
(unsigned)type);
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
|
_DRM_FLAG_MEMTYPE_CSELECT |
|
||||||
|
_DRM_FLAG_NEEDS_IOREMAP;
|
||||||
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
break;
|
||||||
|
case NOUVEAU_GART_SGDMA:
|
||||||
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
|
_DRM_FLAG_MEMTYPE_CSELECT |
|
||||||
|
_DRM_FLAG_MEMTYPE_CMA;
|
||||||
|
man->drm_bus_maptype = _DRM_SCATTER_GATHER;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("Unknown GART type: %d\n",
|
||||||
|
dev_priv->gart_info.type);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
man->io_offset = dev->agp->agp_info.aper_base;
|
man->io_offset = dev_priv->gart_info.aper_base;
|
||||||
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
man->io_size = dev_priv->gart_info.aper_size;
|
||||||
man->io_addr = NULL;
|
man->io_addr = NULL;
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
|
||||||
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -102,44 +130,127 @@ int nouveau_init_mem_type(struct drm_device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t nouveau_evict_mask(struct drm_buffer_object *bo)
|
static uint32_t
|
||||||
|
nouveau_bo_evict_mask(struct drm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
switch (bo->mem.mem_type) {
|
switch (bo->mem.mem_type) {
|
||||||
case DRM_BO_MEM_LOCAL:
|
case DRM_BO_MEM_LOCAL:
|
||||||
case DRM_BO_MEM_TT:
|
case DRM_BO_MEM_TT:
|
||||||
return DRM_BO_FLAG_MEM_LOCAL;
|
return DRM_BO_FLAG_MEM_LOCAL;
|
||||||
case DRM_BO_MEM_VRAM:
|
|
||||||
if (bo->mem.num_pages > 128)
|
|
||||||
return DRM_BO_MEM_TT;
|
|
||||||
else
|
|
||||||
return DRM_BO_MEM_LOCAL;
|
|
||||||
default:
|
default:
|
||||||
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nouveau_move(struct drm_buffer_object *bo,
|
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
|
||||||
int evict,
|
* DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
|
||||||
int no_wait,
|
*/
|
||||||
|
static int
|
||||||
|
nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
|
||||||
|
struct drm_bo_mem_reg *new_mem)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = bo->dev;
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct nouveau_drm_channel *dchan = &dev_priv->channel;
|
||||||
|
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||||
|
uint32_t srch, dsth, page_count;
|
||||||
|
|
||||||
|
/* Can happen during init/takedown */
|
||||||
|
if (!dchan->chan)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
|
||||||
|
dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
|
||||||
|
if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
|
||||||
|
dchan->m2mf_dma_source = srch;
|
||||||
|
dchan->m2mf_dma_destin = dsth;
|
||||||
|
|
||||||
|
BEGIN_RING(NvSubM2MF,
|
||||||
|
NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
|
||||||
|
OUT_RING (dchan->m2mf_dma_source);
|
||||||
|
OUT_RING (dchan->m2mf_dma_destin);
|
||||||
|
}
|
||||||
|
|
||||||
|
page_count = new_mem->num_pages;
|
||||||
|
while (page_count) {
|
||||||
|
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||||
|
|
||||||
|
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
|
||||||
|
OUT_RING (old_mem->mm_node->start << PAGE_SHIFT);
|
||||||
|
OUT_RING (new_mem->mm_node->start << PAGE_SHIFT);
|
||||||
|
OUT_RING (PAGE_SIZE); /* src_pitch */
|
||||||
|
OUT_RING (PAGE_SIZE); /* dst_pitch */
|
||||||
|
OUT_RING (PAGE_SIZE); /* line_length */
|
||||||
|
OUT_RING (line_count);
|
||||||
|
OUT_RING ((1<<8)|(1<<0));
|
||||||
|
OUT_RING (0);
|
||||||
|
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
|
||||||
|
OUT_RING (0);
|
||||||
|
|
||||||
|
page_count -= line_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
|
||||||
|
DRM_FENCE_TYPE_EXE, 0, new_mem);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
|
||||||
struct drm_bo_mem_reg *new_mem)
|
struct drm_bo_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||||
|
|
||||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||||
|
if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
|
||||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
#if 0
|
||||||
|
if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
|
||||||
|
#endif
|
||||||
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||||
|
#if 0
|
||||||
|
if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem))
|
||||||
|
#endif
|
||||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
|
||||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nouveau_flush_ttm(struct drm_ttm *ttm)
|
static void
|
||||||
|
nouveau_bo_flush_ttm(struct drm_ttm *ttm)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
static uint32_t nouveau_mem_prios[] = {
|
||||||
|
DRM_BO_MEM_PRIV0,
|
||||||
|
DRM_BO_MEM_VRAM,
|
||||||
|
DRM_BO_MEM_TT,
|
||||||
|
DRM_BO_MEM_LOCAL
|
||||||
|
};
|
||||||
|
static uint32_t nouveau_busy_prios[] = {
|
||||||
|
DRM_BO_MEM_TT,
|
||||||
|
DRM_BO_MEM_PRIV0,
|
||||||
|
DRM_BO_MEM_VRAM,
|
||||||
|
DRM_BO_MEM_LOCAL
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_bo_driver nouveau_bo_driver = {
|
||||||
|
.mem_type_prio = nouveau_mem_prios,
|
||||||
|
.mem_busy_prio = nouveau_busy_prios,
|
||||||
|
.num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
|
||||||
|
.num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
|
||||||
|
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
|
||||||
|
.fence_type = nouveau_bo_fence_type,
|
||||||
|
.invalidate_caches = nouveau_bo_invalidate_caches,
|
||||||
|
.init_mem_type = nouveau_bo_init_mem_type,
|
||||||
|
.evict_mask = nouveau_bo_evict_mask,
|
||||||
|
.move = nouveau_bo_move,
|
||||||
|
.ttm_cache_flush= nouveau_bo_flush_ttm
|
||||||
|
};
|
||||||
|
|
|
@ -41,25 +41,6 @@ static struct pci_device_id pciidlist[] = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef NOUVEAU_HAVE_BUFFER
|
|
||||||
static uint32_t nouveau_mem_prios[] = { DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL };
|
|
||||||
static uint32_t nouveau_busy_prios[] = { DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL };
|
|
||||||
|
|
||||||
static struct drm_bo_driver nouveau_bo_driver = {
|
|
||||||
.mem_type_prio = nouveau_mem_prios,
|
|
||||||
.mem_busy_prio = nouveau_busy_prios,
|
|
||||||
.num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
|
|
||||||
.num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
|
|
||||||
.create_ttm_backend_entry = nouveau_create_ttm_backend_entry,
|
|
||||||
.fence_type = nouveau_fence_types,
|
|
||||||
.invalidate_caches = nouveau_invalidate_caches,
|
|
||||||
.init_mem_type = nouveau_init_mem_type,
|
|
||||||
.evict_mask = nouveau_evict_mask,
|
|
||||||
.move = nouveau_move,
|
|
||||||
.ttm_cache_flush= nouveau_flush_ttm
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern struct drm_ioctl_desc nouveau_ioctls[];
|
extern struct drm_ioctl_desc nouveau_ioctls[];
|
||||||
extern int nouveau_max_ioctl;
|
extern int nouveau_max_ioctl;
|
||||||
|
|
||||||
|
@ -99,9 +80,9 @@ static struct drm_driver driver = {
|
||||||
.probe = probe,
|
.probe = probe,
|
||||||
.remove = __devexit_p(drm_cleanup_pci),
|
.remove = __devexit_p(drm_cleanup_pci),
|
||||||
},
|
},
|
||||||
#ifdef NOUVEAU_HAVE_BUFFER
|
|
||||||
.bo_driver = &nouveau_bo_driver,
|
.bo_driver = &nouveau_bo_driver,
|
||||||
#endif
|
.fence_driver = &nouveau_fence_driver,
|
||||||
|
|
||||||
.name = DRIVER_NAME,
|
.name = DRIVER_NAME,
|
||||||
.desc = DRIVER_DESC,
|
.desc = DRIVER_DESC,
|
||||||
|
|
|
@ -0,0 +1,133 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2007 Ben Skeggs.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
* a copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial
|
||||||
|
* portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||||
|
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||||
|
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "drmP.h"
|
||||||
|
#include "drm.h"
|
||||||
|
#include "nouveau_drv.h"
|
||||||
|
#include "nouveau_dma.h"
|
||||||
|
|
||||||
|
static int
|
||||||
|
nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
|
||||||
|
|
||||||
|
/* DRM's channel always uses IRQs to signal fences */
|
||||||
|
if (class == dev_priv->channel.chan->id)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/* Other channels don't use IRQs at all yet */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
|
||||||
|
uint32_t *breadcrumb, uint32_t *native_type)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct nouveau_channel *chan = dev_priv->fifos[class];
|
||||||
|
struct nouveau_drm_channel *dchan = &dev_priv->channel;
|
||||||
|
|
||||||
|
DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
|
||||||
|
|
||||||
|
/* We can't emit fences on client channels, update sequence number
|
||||||
|
* and userspace will emit the fence
|
||||||
|
*/
|
||||||
|
*breadcrumb = ++chan->next_sequence;
|
||||||
|
*native_type = DRM_FENCE_TYPE_EXE;
|
||||||
|
if (chan != dchan->chan) {
|
||||||
|
DRM_DEBUG("user fence 0x%08x\n", *breadcrumb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG("emit 0x%08x\n", *breadcrumb);
|
||||||
|
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1);
|
||||||
|
OUT_RING (*breadcrumb);
|
||||||
|
BEGIN_RING(NvSubM2MF, 0x0150, 1);
|
||||||
|
OUT_RING (0);
|
||||||
|
FIRE_RING ();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
|
||||||
|
uint32_t pending_types = 0;
|
||||||
|
|
||||||
|
DRM_DEBUG("class=%d\n", class);
|
||||||
|
|
||||||
|
pending_types = fc->pending_flush |
|
||||||
|
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
|
||||||
|
DRM_DEBUG("pending: 0x%08x 0x%08x\n", pending_types,
|
||||||
|
fc->pending_flush);
|
||||||
|
|
||||||
|
if (pending_types) {
|
||||||
|
uint32_t sequence = NV_READ(NV03_FIFO_REGS(class) + 0x48);
|
||||||
|
|
||||||
|
DRM_DEBUG("got 0x%08x\n", sequence);
|
||||||
|
drm_fence_handler(dev, class, sequence, pending_types, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nouveau_fence_poke_flush(struct drm_device *dev, uint32_t class)
|
||||||
|
{
|
||||||
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
DRM_DEBUG("class=%d\n", class);
|
||||||
|
|
||||||
|
write_lock_irqsave(&fm->lock, flags);
|
||||||
|
nouveau_fence_perform_flush(dev, class);
|
||||||
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nouveau_fence_handler(struct drm_device *dev, int channel)
|
||||||
|
{
|
||||||
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
|
|
||||||
|
DRM_DEBUG("class=%d\n", channel);
|
||||||
|
|
||||||
|
write_lock(&fm->lock);
|
||||||
|
nouveau_fence_perform_flush(dev, channel);
|
||||||
|
write_unlock(&fm->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct drm_fence_driver nouveau_fence_driver = {
|
||||||
|
.num_classes = 8,
|
||||||
|
.wrap_diff = (1 << 30),
|
||||||
|
.flush_diff = (1 << 29),
|
||||||
|
.sequence_mask = 0xffffffffU,
|
||||||
|
.lazy_capable = 1,
|
||||||
|
.has_irq = nouveau_fence_has_irq,
|
||||||
|
.emit = nouveau_fence_emit,
|
||||||
|
.poke_flush = nouveau_fence_poke_flush
|
||||||
|
};
|
|
@ -336,4 +336,3 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
|
||||||
DRM_ERROR("Unimplemented on NV50\n");
|
DRM_ERROR("Unimplemented on NV50\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,10 +2,10 @@
|
||||||
# script to create a Linux Kernel tree from the DRM tree for diffing etc..
|
# script to create a Linux Kernel tree from the DRM tree for diffing etc..
|
||||||
#
|
#
|
||||||
# Original author - Dave Airlie (C) 2004 - airlied@linux.ie
|
# Original author - Dave Airlie (C) 2004 - airlied@linux.ie
|
||||||
#
|
# kernel_version to remove below (e.g. 2.6.24)
|
||||||
|
|
||||||
if [ $# -lt 1 ] ;then
|
if [ $# -lt 2 ] ;then
|
||||||
echo usage: $0 output_dir
|
echo usage: $0 output_dir kernel_version
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -15,43 +15,23 @@ if [ ! -d shared-core -o ! -d linux-core ] ;then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
OUTDIR=$1/drivers/char/drm/
|
OUTDIR=$1/drivers/char/drm/
|
||||||
|
KERNEL_VERS=$2
|
||||||
echo "Copying kernel independent files"
|
echo "Copying kernel independent files"
|
||||||
mkdir -p $OUTDIR
|
mkdir -p $OUTDIR/.tmp
|
||||||
|
|
||||||
( cd linux-core/ ; make drm_pciids.h )
|
( cd linux-core/ ; make drm_pciids.h )
|
||||||
cp shared-core/*.[ch] $OUTDIR
|
cp shared-core/*.[ch] $OUTDIR/.tmp
|
||||||
cp linux-core/*.[ch] $OUTDIR
|
cp linux-core/*.[ch] $OUTDIR/.tmp
|
||||||
cp linux-core/Makefile.kernel $OUTDIR/Makefile
|
cp linux-core/Makefile.kernel $OUTDIR/.tmp/Makefile
|
||||||
|
|
||||||
echo "Copying 2.6 Kernel files"
|
echo "Copying 2.6 Kernel files"
|
||||||
cp linux-core/Kconfig $OUTDIR/
|
cp linux-core/Kconfig $OUTDIR/.tmp
|
||||||
|
|
||||||
|
./scripts/drm-scripts-gentree.pl $KERNEL_VERS $OUTDIR/.tmp $OUTDIR
|
||||||
cd $OUTDIR
|
cd $OUTDIR
|
||||||
|
|
||||||
|
rm -rf .tmp
|
||||||
rm via_ds.[ch]
|
rm via_ds.[ch]
|
||||||
for i in via*.[ch]
|
|
||||||
do
|
|
||||||
unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DVIA_HAVE_CORE_MM $i > $i.tmp
|
|
||||||
mv $i.tmp $i
|
|
||||||
done
|
|
||||||
|
|
||||||
rm sis_ds.[ch]
|
rm sis_ds.[ch]
|
||||||
for i in sis*.[ch]
|
|
||||||
do
|
|
||||||
unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DSIS_HAVE_CORE_MM $i > $i.tmp
|
|
||||||
mv $i.tmp $i
|
|
||||||
done
|
|
||||||
|
|
||||||
for i in i915*.[ch]
|
|
||||||
do
|
|
||||||
unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp
|
|
||||||
mv $i.tmp $i
|
|
||||||
done
|
|
||||||
|
|
||||||
for i in drm*.[ch]
|
|
||||||
do
|
|
||||||
unifdef -UDRM_ODD_MM_COMPAT -D__linux__ $i > $i.tmp
|
|
||||||
mv $i.tmp $i
|
|
||||||
done
|
|
||||||
cd -
|
cd -
|
||||||
|
|
|
@ -0,0 +1,254 @@
|
||||||
|
#!/usr/bin/perl
|
||||||
|
#
|
||||||
|
# Original version were part of Gerd Knorr's v4l scripts.
|
||||||
|
#
|
||||||
|
# Several improvements by (c) 2005-2007 Mauro Carvalho Chehab
|
||||||
|
#
|
||||||
|
# Largely re-written (C) 2007 Trent Piepho <xyzzy@speakeasy.org>
|
||||||
|
# Stolen for DRM usage by airlied
|
||||||
|
#
|
||||||
|
# Theory of Operation
|
||||||
|
#
|
||||||
|
# This acts as a sort of mini version of cpp, which will process
|
||||||
|
# #if/#elif/#ifdef/etc directives to strip out code used to support
|
||||||
|
# multiple kernel versions or otherwise not wanted to be sent upstream to
|
||||||
|
# git.
|
||||||
|
#
|
||||||
|
# Conditional compilation directives fall into two catagories,
|
||||||
|
# "processed" and "other". The "other" directives are ignored and simply
|
||||||
|
# output as they come in without changes (see 'keep' exception). The
|
||||||
|
# "processed" variaty are evaluated and only the lines in the 'true' part
|
||||||
|
# are kept, like cpp would do.
|
||||||
|
#
|
||||||
|
# If gentree knows the result of an expression, that directive will be
|
||||||
|
# "processed", otherwise it will be an "other". gentree knows the value
|
||||||
|
# of LINUX_VERSION_CODE, BTTV_VERSION_CODE, the KERNEL_VERSION(x,y,z)
|
||||||
|
# macro, numeric constants like 0 and 1, and a few defines like MM_KERNEL
|
||||||
|
# and STV0297_CS2.
|
||||||
|
#
|
||||||
|
# An exception is if the comment "/*KEEP*/" appears after the expression,
|
||||||
|
# in which case that directive will be considered an "other" and not
|
||||||
|
# processed, other than to remove the keep comment.
|
||||||
|
#
|
||||||
|
# Known bugs:
|
||||||
|
# don't specify the root directory e.g. '/' or even '////'
|
||||||
|
# directives continued with a back-slash will always be ignored
|
||||||
|
# you can't modify a source tree in-place, i.e. source dir == dest dir
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use File::Find;
|
||||||
|
use Fcntl ':mode';
|
||||||
|
|
||||||
|
my $VERSION = shift;
|
||||||
|
my $SRC = shift;
|
||||||
|
my $DESTDIR = shift;
|
||||||
|
|
||||||
|
if (!defined($DESTDIR)) {
|
||||||
|
print "Usage:\ngentree.pl\t<version> <source dir> <dest dir>\n\n";
|
||||||
|
exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
my $BTTVCODE = KERNEL_VERSION(0,9,17);
|
||||||
|
my ($LINUXCODE, $extra) = kernel_version($VERSION);
|
||||||
|
my $DEBUG = 0;
|
||||||
|
|
||||||
|
my %defs = (
|
||||||
|
'LINUX_VERSION_CODE' => $LINUXCODE,
|
||||||
|
'MM_KERNEL' => ($extra =~ /-mm/)?1:0,
|
||||||
|
'DRM_ODD_MM_COMPAT' => 0,
|
||||||
|
'I915_HAVE_FENCE' => 1,
|
||||||
|
'I915_HAVE_BUFFER' => 1,
|
||||||
|
'VIA_HAVE_DMABLIT' => 1,
|
||||||
|
'VIA_HAVE_CORE_MM' => 1,
|
||||||
|
'VIA_HAVE_FENCE' => 1,
|
||||||
|
'VIA_HAVE_BUFFER' => 1,
|
||||||
|
'SIS_HAVE_CORE_MM' => 1,
|
||||||
|
'DRM_FULL_MM_COMPAT' => 1,
|
||||||
|
'__linux__' => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
# helpers
|
||||||
|
|
||||||
|
sub kernel_version($) {
|
||||||
|
$_[0] =~ m/(\d+)\.(\d+)\.(\d+)(.*)/;
|
||||||
|
return ($1*65536 + $2*256 + $3, $4);
|
||||||
|
}
|
||||||
|
|
||||||
|
# used in eval()
|
||||||
|
sub KERNEL_VERSION($$$) { return $_[0]*65536 + $_[1]*256 + $_[2]; }
|
||||||
|
|
||||||
|
sub evalexp($) {
|
||||||
|
local $_ = shift;
|
||||||
|
s|/\*.*?\*/||go; # delete /* */ comments
|
||||||
|
s|//.*$||o; # delete // comments
|
||||||
|
s/\bdefined\s*\(/(/go; # defined(foo) to (foo)
|
||||||
|
while (/\b([_A-Za-z]\w*)\b/go) {
|
||||||
|
if (exists $defs{$1}) {
|
||||||
|
my $id = $1; my $pos = $-[0];
|
||||||
|
s/$id/$defs{$id}/;
|
||||||
|
pos = $-[0];
|
||||||
|
} elsif ($1 ne 'KERNEL_VERSION') {
|
||||||
|
return(undef);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return(eval($_) ? 1 : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
# filter out version-specific code
|
||||||
|
|
||||||
|
sub filter_source ($$) {
|
||||||
|
my ($in,$out) = @_;
|
||||||
|
my $line;
|
||||||
|
my $level=0;
|
||||||
|
my %if = ();
|
||||||
|
my %state = ();
|
||||||
|
|
||||||
|
my @dbgargs = \($level, %state, %if, $line);
|
||||||
|
sub dbgline($\@) {
|
||||||
|
my $level = ${$_[1][0]};
|
||||||
|
printf STDERR ("/* BP %4d $_[0] state=$_[1][1]->{$level} if=$_[1][2]->{$level} level=$level (${$_[1][3]}) */\n", $.) if $DEBUG;
|
||||||
|
}
|
||||||
|
|
||||||
|
open IN, '<', $in or die "Error opening $in: $!\n";
|
||||||
|
open OUT, '>', $out or die "Error opening $out: $!\n";
|
||||||
|
|
||||||
|
print STDERR "File: $in, for kernel $VERSION($LINUXCODE)/\n" if $DEBUG;
|
||||||
|
|
||||||
|
while ($line = <IN>) {
|
||||||
|
chomp $line;
|
||||||
|
next if ($line =~ m/^#include \"compat.h\"/o);
|
||||||
|
# next if ($line =~ m/[\$]Id:/);
|
||||||
|
|
||||||
|
# For "#if 0 /*KEEP*/;" the ; should be dropped too
|
||||||
|
if ($line =~ m@^\s*#\s*if(n?def)?\s.*?(\s*/\*\s*(?i)keep\s*\*/;?)@) {
|
||||||
|
$state{$level} = "ifother";
|
||||||
|
$if{$level} = 1;
|
||||||
|
dbgline "#if$1 (keep)", @dbgargs;
|
||||||
|
$line =~ s/\Q$2\E//;
|
||||||
|
$level++;
|
||||||
|
}
|
||||||
|
# handle all ifdef/ifndef lines
|
||||||
|
elsif ($line =~ /^\s*#\s*if(n?)def\s*(\w+)/o) {
|
||||||
|
if (exists $defs{$2}) {
|
||||||
|
$state{$level} = 'if';
|
||||||
|
$if{$level} = ($1 eq 'n') ? !$defs{$2} : $defs{$2};
|
||||||
|
dbgline "#if$1def $2", @dbgargs;
|
||||||
|
$level++;
|
||||||
|
next;
|
||||||
|
}
|
||||||
|
$state{$level} = "ifother";
|
||||||
|
$if{$level} = 1;
|
||||||
|
dbgline "#if$1def (other)", @dbgargs;
|
||||||
|
$level++;
|
||||||
|
}
|
||||||
|
# handle all ifs
|
||||||
|
elsif ($line =~ /^\s*#\s*if\s+(.*)$/o) {
|
||||||
|
my $res = evalexp($1);
|
||||||
|
if (defined $res) {
|
||||||
|
$state{$level} = 'if';
|
||||||
|
$if{$level} = $res;
|
||||||
|
dbgline '#if '.($res?'(yes)':'(no)'), @dbgargs;
|
||||||
|
$level++;
|
||||||
|
next;
|
||||||
|
} else {
|
||||||
|
$state{$level} = 'ifother';
|
||||||
|
$if{$level} = 1;
|
||||||
|
dbgline '#if (other)', @dbgargs;
|
||||||
|
$level++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# handle all elifs
|
||||||
|
elsif ($line =~ /^\s*#\s*elif\s+(.*)$/o) {
|
||||||
|
my $exp = $1;
|
||||||
|
$level--;
|
||||||
|
$level < 0 and die "more elifs than ifs";
|
||||||
|
$state{$level} =~ /if/ or die "unmatched elif";
|
||||||
|
|
||||||
|
if ($state{$level} eq 'if' && !$if{$level}) {
|
||||||
|
my $res = evalexp($exp);
|
||||||
|
defined $res or die 'moving from if to ifother';
|
||||||
|
$state{$level} = 'if';
|
||||||
|
$if{$level} = $res;
|
||||||
|
dbgline '#elif1 '.($res?'(yes)':'(no)'), @dbgargs;
|
||||||
|
$level++;
|
||||||
|
next;
|
||||||
|
} elsif ($state{$level} ne 'ifother') {
|
||||||
|
$if{$level} = 0;
|
||||||
|
$state{$level} = 'elif';
|
||||||
|
dbgline '#elif0', @dbgargs;
|
||||||
|
$level++;
|
||||||
|
next;
|
||||||
|
}
|
||||||
|
$level++;
|
||||||
|
}
|
||||||
|
elsif ($line =~ /^\s*#\s*else/o) {
|
||||||
|
$level--;
|
||||||
|
$level < 0 and die "more elses than ifs";
|
||||||
|
$state{$level} =~ /if/ or die "unmatched else";
|
||||||
|
$if{$level} = !$if{$level} if ($state{$level} eq 'if');
|
||||||
|
$state{$level} =~ s/^if/else/o; # if -> else, ifother -> elseother, elif -> elif
|
||||||
|
dbgline '#else', @dbgargs;
|
||||||
|
$level++;
|
||||||
|
next if $state{$level-1} !~ /other$/o;
|
||||||
|
}
|
||||||
|
elsif ($line =~ /^\s*#\s*endif/o) {
|
||||||
|
$level--;
|
||||||
|
$level < 0 and die "more endifs than ifs";
|
||||||
|
dbgline '#endif', @dbgargs;
|
||||||
|
next if $state{$level} !~ /other$/o;
|
||||||
|
}
|
||||||
|
|
||||||
|
my $print = 1;
|
||||||
|
for (my $i=0;$i<$level;$i++) {
|
||||||
|
next if $state{$i} =~ /other$/o; # keep code in ifother/elseother blocks
|
||||||
|
if (!$if{$i}) {
|
||||||
|
$print = 0;
|
||||||
|
dbgline 'DEL', @{[\$i, \%state, \%if, \$line]};
|
||||||
|
last;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
print OUT "$line\n" if $print;
|
||||||
|
}
|
||||||
|
close IN;
|
||||||
|
close OUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
|
||||||
|
sub parse_dir {
|
||||||
|
my $file = $File::Find::name;
|
||||||
|
|
||||||
|
return if ($file =~ /CVS/);
|
||||||
|
return if ($file =~ /~$/);
|
||||||
|
|
||||||
|
my $f2 = $file;
|
||||||
|
$f2 =~ s/^\Q$SRC\E/$DESTDIR/;
|
||||||
|
|
||||||
|
my $mode = (stat($file))[2];
|
||||||
|
if ($mode & S_IFDIR) {
|
||||||
|
print("mkdir -p '$f2'\n");
|
||||||
|
system("mkdir -p '$f2'"); # should check for error
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
print "from $file to $f2\n";
|
||||||
|
|
||||||
|
if ($file =~ m/.*\.[ch]$/) {
|
||||||
|
filter_source($file, $f2);
|
||||||
|
} else {
|
||||||
|
system("cp $file $f2");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# main
|
||||||
|
|
||||||
|
printf "kernel is %s (0x%x)\n",$VERSION,$LINUXCODE;
|
||||||
|
|
||||||
|
# remove any trailing slashes from dir names. don't pass in just '/'
|
||||||
|
$SRC =~ s|/*$||; $DESTDIR =~ s|/*$||;
|
||||||
|
|
||||||
|
print "finding files at $SRC\n";
|
||||||
|
|
||||||
|
find({wanted => \&parse_dir, no_chdir => 1}, $SRC);
|
|
@ -528,6 +528,7 @@ void i915_enable_interrupt (struct drm_device *dev)
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||||
|
|
||||||
dev_priv->irq_enable_reg = USER_INT_FLAG;
|
dev_priv->irq_enable_reg = USER_INT_FLAG;
|
||||||
|
|
||||||
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
|
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
|
||||||
dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
|
dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
|
||||||
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
|
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
|
||||||
|
|
|
@ -177,4 +177,3 @@ nouveau_dma_wait(struct drm_device *dev, int size)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,4 +95,3 @@ typedef enum {
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -158,4 +158,3 @@ struct drm_nouveau_sarea {
|
||||||
#define DRM_NOUVEAU_MEM_FREE 0x09
|
#define DRM_NOUVEAU_MEM_FREE 0x09
|
||||||
|
|
||||||
#endif /* __NOUVEAU_DRM_H__ */
|
#endif /* __NOUVEAU_DRM_H__ */
|
||||||
|
|
||||||
|
|
|
@ -39,16 +39,9 @@
|
||||||
#define NOUVEAU_FAMILY 0x0000FFFF
|
#define NOUVEAU_FAMILY 0x0000FFFF
|
||||||
#define NOUVEAU_FLAGS 0xFFFF0000
|
#define NOUVEAU_FLAGS 0xFFFF0000
|
||||||
|
|
||||||
#if 0
|
|
||||||
#if defined(__linux__)
|
|
||||||
#define NOUVEAU_HAVE_BUFFER
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "nouveau_drm.h"
|
#include "nouveau_drm.h"
|
||||||
#include "nouveau_reg.h"
|
#include "nouveau_reg.h"
|
||||||
|
|
||||||
|
|
||||||
struct mem_block {
|
struct mem_block {
|
||||||
struct mem_block *next;
|
struct mem_block *next;
|
||||||
struct mem_block *prev;
|
struct mem_block *prev;
|
||||||
|
@ -113,6 +106,9 @@ struct nouveau_channel
|
||||||
/* mapping of the regs controling the fifo */
|
/* mapping of the regs controling the fifo */
|
||||||
drm_local_map_t *regs;
|
drm_local_map_t *regs;
|
||||||
|
|
||||||
|
/* Fencing */
|
||||||
|
uint32_t next_sequence;
|
||||||
|
|
||||||
/* DMA push buffer */
|
/* DMA push buffer */
|
||||||
struct nouveau_gpuobj_ref *pushbuf;
|
struct nouveau_gpuobj_ref *pushbuf;
|
||||||
struct mem_block *pushbuf_mem;
|
struct mem_block *pushbuf_mem;
|
||||||
|
@ -232,6 +228,8 @@ struct drm_nouveau_private {
|
||||||
NOUVEAU_CARD_INIT_FAILED
|
NOUVEAU_CARD_INIT_FAILED
|
||||||
} init_state;
|
} init_state;
|
||||||
|
|
||||||
|
int ttm;
|
||||||
|
|
||||||
/* the card type, takes NV_* as values */
|
/* the card type, takes NV_* as values */
|
||||||
int card_type;
|
int card_type;
|
||||||
/* exact chipset, derived from NV_PMC_BOOT_0 */
|
/* exact chipset, derived from NV_PMC_BOOT_0 */
|
||||||
|
@ -351,6 +349,7 @@ extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
|
||||||
int flags, struct drm_file *);
|
int flags, struct drm_file *);
|
||||||
extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
|
extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
|
||||||
extern int nouveau_mem_init(struct drm_device *);
|
extern int nouveau_mem_init(struct drm_device *);
|
||||||
|
extern int nouveau_mem_init_ttm(struct drm_device *);
|
||||||
extern void nouveau_mem_close(struct drm_device *);
|
extern void nouveau_mem_close(struct drm_device *);
|
||||||
|
|
||||||
/* nouveau_notifier.c */
|
/* nouveau_notifier.c */
|
||||||
|
@ -560,16 +559,12 @@ extern void nv04_timer_takedown(struct drm_device *);
|
||||||
extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
|
extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
|
||||||
unsigned long arg);
|
unsigned long arg);
|
||||||
|
|
||||||
#ifdef NOUVEAU_HAVE_BUFFER
|
|
||||||
/* nouveau_buffer.c */
|
/* nouveau_buffer.c */
|
||||||
extern struct drm_ttm_backend *nouveau_create_ttm_backend_entry(struct drm_device *dev);
|
extern struct drm_bo_driver nouveau_bo_driver;
|
||||||
extern int nouveau_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, uint32_t *type);
|
|
||||||
extern int nouveau_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
/* nouveau_fence.c */
|
||||||
extern int nouveau_init_mem_type(struct drm_device *dev, uint32_t type, struct drm_mem_type_manager *man);
|
extern struct drm_fence_driver nouveau_fence_driver;
|
||||||
extern uint32_t nouveau_evict_mask(struct drm_buffer_object *bo);
|
extern void nouveau_fence_handler(struct drm_device *dev, int channel);
|
||||||
extern int nouveau_move(struct drm_buffer_object *bo, int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
|
|
||||||
void nouveau_flush_ttm(struct drm_ttm *ttm);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__powerpc__)
|
#if defined(__powerpc__)
|
||||||
#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
|
#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
|
||||||
|
@ -592,4 +587,3 @@ void nouveau_flush_ttm(struct drm_ttm *ttm);
|
||||||
#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
|
#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
|
||||||
|
|
||||||
#endif /* __NOUVEAU_DRV_H__ */
|
#endif /* __NOUVEAU_DRV_H__ */
|
||||||
|
|
||||||
|
|
|
@ -115,7 +115,7 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
DRM_INFO("Unhandled PFIFO_INTR - 0x%8x\n", status);
|
DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);
|
||||||
NV_WRITE(NV03_PFIFO_INTR_0, status);
|
NV_WRITE(NV03_PFIFO_INTR_0, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,39 +244,53 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct nouveau_pgraph_trap {
|
||||||
|
int channel;
|
||||||
|
int class;
|
||||||
|
int subc, mthd, size;
|
||||||
|
uint32_t data, data2;
|
||||||
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id)
|
nouveau_graph_trap_info(struct drm_device *dev,
|
||||||
|
struct nouveau_pgraph_trap *trap)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
uint32_t address;
|
uint32_t address;
|
||||||
uint32_t channel, class;
|
|
||||||
uint32_t method, subc, data, data2;
|
if (nouveau_graph_trapped_channel(dev, &trap->channel))
|
||||||
|
trap->channel = -1;
|
||||||
|
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
|
||||||
|
|
||||||
|
trap->mthd = address & 0x1FFC;
|
||||||
|
trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
|
||||||
|
if (dev_priv->card_type < NV_10) {
|
||||||
|
trap->subc = (address >> 13) & 0x7;
|
||||||
|
} else {
|
||||||
|
trap->subc = (address >> 16) & 0x7;
|
||||||
|
trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev_priv->card_type < NV_10) {
|
||||||
|
trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF;
|
||||||
|
} else if (dev_priv->card_type < NV_40) {
|
||||||
|
trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF;
|
||||||
|
} else if (dev_priv->card_type < NV_50) {
|
||||||
|
trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF;
|
||||||
|
} else {
|
||||||
|
trap->class = NV_READ(0x400814);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
|
||||||
|
struct nouveau_pgraph_trap *trap)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
uint32_t nsource, nstatus;
|
uint32_t nsource, nstatus;
|
||||||
|
|
||||||
if (nouveau_graph_trapped_channel(dev, &channel))
|
|
||||||
channel = -1;
|
|
||||||
|
|
||||||
data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
|
|
||||||
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
|
|
||||||
method = address & 0x1FFC;
|
|
||||||
if (dev_priv->card_type < NV_10) {
|
|
||||||
subc = (address >> 13) & 0x7;
|
|
||||||
data2= 0;
|
|
||||||
} else {
|
|
||||||
subc = (address >> 16) & 0x7;
|
|
||||||
data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
|
|
||||||
}
|
|
||||||
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
||||||
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
|
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
|
||||||
if (dev_priv->card_type < NV_10) {
|
|
||||||
class = NV_READ(0x400180 + subc*4) & 0xFF;
|
|
||||||
} else if (dev_priv->card_type < NV_40) {
|
|
||||||
class = NV_READ(0x400160 + subc*4) & 0xFFF;
|
|
||||||
} else if (dev_priv->card_type < NV_50) {
|
|
||||||
class = NV_READ(0x400160 + subc*4) & 0xFFFF;
|
|
||||||
} else {
|
|
||||||
class = NV_READ(0x400814);
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_INFO("%s - nSource:", id);
|
DRM_INFO("%s - nSource:", id);
|
||||||
nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
|
nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
|
||||||
|
@ -291,45 +305,60 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id)
|
||||||
printk("\n");
|
printk("\n");
|
||||||
|
|
||||||
DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
|
DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
|
||||||
id, channel, subc, class, method, data2, data);
|
id, trap->channel, trap->subc, trap->class, trap->mthd,
|
||||||
|
trap->data2, trap->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
|
nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct nouveau_pgraph_trap trap;
|
||||||
int handled = 0;
|
int unhandled = 0;
|
||||||
|
|
||||||
DRM_DEBUG("PGRAPH notify interrupt\n");
|
nouveau_graph_trap_info(dev, &trap);
|
||||||
if (dev_priv->card_type == NV_04 &&
|
|
||||||
(nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
|
|
||||||
uint32_t class, mthd;
|
|
||||||
|
|
||||||
|
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||||
/* NV4 (nvidia TNT 1) reports software methods with
|
/* NV4 (nvidia TNT 1) reports software methods with
|
||||||
* PGRAPH NOTIFY ILLEGAL_MTHD
|
* PGRAPH NOTIFY ILLEGAL_MTHD
|
||||||
*/
|
*/
|
||||||
mthd = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC;
|
|
||||||
class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF;
|
|
||||||
DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
|
DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
|
||||||
mthd, class);
|
trap.mthd, trap.class);
|
||||||
|
|
||||||
if (nouveau_sw_method_execute(dev, class, mthd)) {
|
if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
|
||||||
DRM_ERROR("Unable to execute NV04 software method %x "
|
DRM_ERROR("Unable to execute NV04 software method %x "
|
||||||
"for object class %x. Please report.\n",
|
"for object class %x. Please report.\n",
|
||||||
mthd, class);
|
trap.mthd, trap.class);
|
||||||
} else {
|
unhandled = 1;
|
||||||
handled = 1;
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
unhandled = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!handled)
|
if (unhandled)
|
||||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY");
|
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
|
nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
|
||||||
{
|
{
|
||||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR");
|
struct nouveau_pgraph_trap trap;
|
||||||
|
int unhandled = 0;
|
||||||
|
|
||||||
|
nouveau_graph_trap_info(dev, &trap);
|
||||||
|
|
||||||
|
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||||
|
if (trap.channel >= 0 && trap.mthd == 0x0150) {
|
||||||
|
nouveau_fence_handler(dev, trap.channel);
|
||||||
|
} else
|
||||||
|
if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
|
||||||
|
unhandled = 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unhandled = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unhandled)
|
||||||
|
nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -389,7 +418,7 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
DRM_INFO("Unhandled PGRAPH_INTR - 0x%8x\n", status);
|
DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
|
||||||
NV_WRITE(NV03_PGRAPH_INTR, status);
|
NV_WRITE(NV03_PGRAPH_INTR, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,4 +474,3 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -301,13 +301,11 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nouveau_mem_init_agp(struct drm_device *dev)
|
nouveau_mem_init_agp(struct drm_device *dev, int ttm)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct drm_agp_info info;
|
struct drm_agp_info info;
|
||||||
struct drm_agp_mode mode;
|
struct drm_agp_mode mode;
|
||||||
struct drm_agp_buffer agp_req;
|
|
||||||
struct drm_agp_binding bind_req;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = drm_agp_acquire(dev);
|
ret = drm_agp_acquire(dev);
|
||||||
|
@ -330,6 +328,10 @@ nouveau_mem_init_agp(struct drm_device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ttm) {
|
||||||
|
struct drm_agp_buffer agp_req;
|
||||||
|
struct drm_agp_binding bind_req;
|
||||||
|
|
||||||
agp_req.size = info.aperture_size;
|
agp_req.size = info.aperture_size;
|
||||||
agp_req.type = 0;
|
agp_req.type = 0;
|
||||||
ret = drm_agp_alloc(dev, &agp_req);
|
ret = drm_agp_alloc(dev, &agp_req);
|
||||||
|
@ -345,6 +347,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
|
||||||
DRM_ERROR("Unable to bind AGP: %d\n", ret);
|
DRM_ERROR("Unable to bind AGP: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
|
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
|
||||||
dev_priv->gart_info.aper_base = info.aperture_base;
|
dev_priv->gart_info.aper_base = info.aperture_base;
|
||||||
|
@ -352,6 +355,73 @@ nouveau_mem_init_agp(struct drm_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define HACK_OLD_MM
|
||||||
|
int
|
||||||
|
nouveau_mem_init_ttm(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
uint32_t vram_size, bar1_size;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
|
||||||
|
dev_priv->fb_phys = drm_get_resource_start(dev,1);
|
||||||
|
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
|
||||||
|
|
||||||
|
drm_bo_driver_init(dev);
|
||||||
|
|
||||||
|
/* non-mappable vram */
|
||||||
|
dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
|
||||||
|
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
|
||||||
|
vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
|
||||||
|
bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
|
||||||
|
if (bar1_size < vram_size) {
|
||||||
|
if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
|
||||||
|
bar1_size, vram_size - bar1_size))) {
|
||||||
|
DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
vram_size = bar1_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mappable vram */
|
||||||
|
#ifdef HACK_OLD_MM
|
||||||
|
vram_size /= 4;
|
||||||
|
#endif
|
||||||
|
if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size))) {
|
||||||
|
DRM_ERROR("Failed VRAM mm init: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* GART */
|
||||||
|
#ifndef __powerpc__
|
||||||
|
if (drm_device_is_agp(dev) && dev->agp) {
|
||||||
|
if ((ret = nouveau_mem_init_agp(dev, 1)))
|
||||||
|
DRM_ERROR("Error initialising AGP: %d\n", ret);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
|
||||||
|
if ((ret = nouveau_sgdma_init(dev)))
|
||||||
|
DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
|
||||||
|
dev_priv->gart_info.aper_size >>
|
||||||
|
PAGE_SHIFT))) {
|
||||||
|
DRM_ERROR("Failed TT mm init: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef HACK_OLD_MM
|
||||||
|
vram_size <<= PAGE_SHIFT;
|
||||||
|
DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
|
||||||
|
if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
|
||||||
|
return -ENOMEM;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int nouveau_mem_init(struct drm_device *dev)
|
int nouveau_mem_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
@ -395,7 +465,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
||||||
#ifndef __powerpc__
|
#ifndef __powerpc__
|
||||||
/* Init AGP / NV50 PCIEGART */
|
/* Init AGP / NV50 PCIEGART */
|
||||||
if (drm_device_is_agp(dev) && dev->agp) {
|
if (drm_device_is_agp(dev) && dev->agp) {
|
||||||
if ((ret = nouveau_mem_init_agp(dev)))
|
if ((ret = nouveau_mem_init_agp(dev, 0)))
|
||||||
DRM_ERROR("Error initialising AGP: %d\n", ret);
|
DRM_ERROR("Error initialising AGP: %d\n", ret);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -606,5 +676,3 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *
|
||||||
nouveau_mem_free(dev, block);
|
nouveau_mem_free(dev, block);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -163,4 +163,3 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1145,4 +1145,3 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -550,4 +550,3 @@
|
||||||
#define NV40_RAMFC_UNK_48 0x48
|
#define NV40_RAMFC_UNK_48 0x48
|
||||||
#define NV40_RAMFC_UNK_4C 0x4C
|
#define NV40_RAMFC_UNK_4C 0x4C
|
||||||
#define NV40_RAMFC_UNK_50 0x50
|
#define NV40_RAMFC_UNK_50 0x50
|
||||||
|
|
||||||
|
|
|
@ -278,6 +278,7 @@ nouveau_card_init(struct drm_device *dev)
|
||||||
|
|
||||||
if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
|
if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
|
||||||
return 0;
|
return 0;
|
||||||
|
dev_priv->ttm = 0;
|
||||||
|
|
||||||
/* Map any PCI resources we need on the card */
|
/* Map any PCI resources we need on the card */
|
||||||
ret = nouveau_init_card_mappings(dev);
|
ret = nouveau_init_card_mappings(dev);
|
||||||
|
@ -315,8 +316,13 @@ nouveau_card_init(struct drm_device *dev)
|
||||||
if (ret) return ret;
|
if (ret) return ret;
|
||||||
|
|
||||||
/* Setup the memory manager */
|
/* Setup the memory manager */
|
||||||
|
if (dev_priv->ttm) {
|
||||||
|
ret = nouveau_mem_init_ttm(dev);
|
||||||
|
if (ret) return ret;
|
||||||
|
} else {
|
||||||
ret = nouveau_mem_init(dev);
|
ret = nouveau_mem_init(dev);
|
||||||
if (ret) return ret;
|
if (ret) return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = nouveau_gpuobj_init(dev);
|
ret = nouveau_gpuobj_init(dev);
|
||||||
if (ret) return ret;
|
if (ret) return ret;
|
||||||
|
@ -635,5 +641,3 @@ void nouveau_wait_for_idle(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -189,5 +189,3 @@ static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t o
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -31,4 +31,3 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
|
int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
|
||||||
|
|
||||||
|
|
|
@ -21,4 +21,3 @@ void
|
||||||
nv04_fb_takedown(struct drm_device *dev)
|
nv04_fb_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -126,4 +126,3 @@ nv04_fifo_save_context(struct nouveau_channel *chan)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,4 +156,3 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
gpuobj->im_bound = 0;
|
gpuobj->im_bound = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,4 +20,3 @@ void
|
||||||
nv04_mc_takedown(struct drm_device *dev)
|
nv04_mc_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,4 +42,3 @@ void
|
||||||
nv04_timer_takedown(struct drm_device *dev)
|
nv04_timer_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,4 +23,3 @@ void
|
||||||
nv10_fb_takedown(struct drm_device *dev)
|
nv10_fb_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -157,4 +157,3 @@ nv10_fifo_save_context(struct nouveau_channel *chan)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -907,4 +907,3 @@ int nv10_graph_init(struct drm_device *dev) {
|
||||||
void nv10_graph_takedown(struct drm_device *dev)
|
void nv10_graph_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -667,10 +667,16 @@ int nv20_graph_save_context(struct nouveau_channel *chan)
|
||||||
|
|
||||||
static void nv20_graph_rdi(struct drm_device *dev) {
|
static void nv20_graph_rdi(struct drm_device *dev) {
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
int i;
|
int i, writecount = 32;
|
||||||
|
uint32_t rdi_index = 0x2c80000;
|
||||||
|
|
||||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000);
|
if (dev_priv->chipset == 0x20) {
|
||||||
for (i = 0; i < 32; i++)
|
rdi_index = 0x3d0000;
|
||||||
|
writecount = 15;
|
||||||
|
}
|
||||||
|
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index);
|
||||||
|
for (i = 0; i < writecount; i++)
|
||||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);
|
NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);
|
||||||
|
|
||||||
nouveau_wait_for_idle(dev);
|
nouveau_wait_for_idle(dev);
|
||||||
|
@ -706,7 +712,7 @@ int nv20_graph_init(struct drm_device *dev) {
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
|
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
|
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
|
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0435); /* 0x4 = auto ctx switch */
|
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
|
||||||
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
|
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
|
||||||
NV_WRITE(0x40009C , 0x00000040);
|
NV_WRITE(0x40009C , 0x00000040);
|
||||||
|
|
||||||
|
@ -718,9 +724,9 @@ int nv20_graph_init(struct drm_device *dev) {
|
||||||
NV_WRITE(0x400098, 0x40000080);
|
NV_WRITE(0x400098, 0x40000080);
|
||||||
NV_WRITE(0x400B88, 0x000000ff);
|
NV_WRITE(0x400B88, 0x000000ff);
|
||||||
} else {
|
} else {
|
||||||
NV_WRITE(0x400880, 0x00080000);
|
NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */
|
||||||
NV_WRITE(0x400094, 0x00000005);
|
NV_WRITE(0x400094, 0x00000005);
|
||||||
NV_WRITE(0x400B80, 0x45CAA208);
|
NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */
|
||||||
NV_WRITE(0x400B84, 0x24000000);
|
NV_WRITE(0x400B84, 0x24000000);
|
||||||
NV_WRITE(0x400098, 0x00000040);
|
NV_WRITE(0x400098, 0x00000040);
|
||||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038);
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038);
|
||||||
|
@ -731,11 +737,27 @@ int nv20_graph_init(struct drm_device *dev) {
|
||||||
|
|
||||||
/* copy tile info from PFB */
|
/* copy tile info from PFB */
|
||||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
|
for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
|
||||||
NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i)));
|
NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
|
||||||
NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i)));
|
/* which is NV40_PGRAPH_TLIMIT0(i) ?? */
|
||||||
NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i)));
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4);
|
||||||
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
|
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i)));
|
||||||
|
NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
|
||||||
|
/* which is NV40_PGRAPH_TSIZE0(i) ?? */
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4);
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i)));
|
||||||
|
NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
|
||||||
|
/* which is NV40_PGRAPH_TILE0(i) ?? */
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4);
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i)));
|
||||||
}
|
}
|
||||||
|
for (i = 0; i < 8; i++) {
|
||||||
|
NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4));
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4);
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4));
|
||||||
|
}
|
||||||
|
NV_WRITE(0x4009a0, NV_READ(0x100324));
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
|
||||||
|
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324));
|
||||||
|
|
||||||
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
|
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
|
||||||
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
|
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
|
||||||
|
@ -865,4 +887,3 @@ int nv30_graph_init(struct drm_device *dev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,4 +53,3 @@ void
|
||||||
nv40_fb_takedown(struct drm_device *dev)
|
nv40_fb_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -205,4 +205,3 @@ nv40_fifo_init(struct drm_device *dev)
|
||||||
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
|
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2221,4 +2221,3 @@ nv40_graph_init(struct drm_device *dev)
|
||||||
void nv40_graph_takedown(struct drm_device *dev)
|
void nv40_graph_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,4 +36,3 @@ void
|
||||||
nv40_mc_takedown(struct drm_device *dev)
|
nv40_mc_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -324,4 +324,3 @@ nv50_fifo_save_context(struct nouveau_channel *chan)
|
||||||
DRM_ERROR("stub!\n");
|
DRM_ERROR("stub!\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -313,4 +313,3 @@ nv50_graph_save_context(struct nouveau_channel *chan)
|
||||||
|
|
||||||
return nv50_graph_transfer_context(dev, inst, 1);
|
return nv50_graph_transfer_context(dev, inst, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -317,4 +317,3 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
gpuobj->im_bound = 0;
|
gpuobj->im_bound = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -419,7 +419,7 @@ extern int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t
|
||||||
|
|
||||||
#ifdef RADEON_HAVE_BUFFER
|
#ifdef RADEON_HAVE_BUFFER
|
||||||
/* radeon_buffer.c */
|
/* radeon_buffer.c */
|
||||||
extern drm_ttm_backend_t *radeon_create_ttm_backend_entry(struct drm_device *dev);
|
extern struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device *dev);
|
||||||
extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type);
|
extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type);
|
||||||
extern int radeon_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
extern int radeon_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||||
extern uint32_t radeon_evict_mask(struct drm_buffer_object *bo);
|
extern uint32_t radeon_evict_mask(struct drm_buffer_object *bo);
|
||||||
|
|
|
@ -1092,4 +1092,3 @@ struct drm_ioctl_desc savage_ioctls[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
|
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
|
||||||
|
|
||||||
|
|
|
@ -140,4 +140,3 @@ int via_driver_unload(struct drm_device *dev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue